input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# Copyright 2015 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the meat of the logic for most of the scripts
that interact with marathon. There's config parsers, url composers,
and a number of other things used by other components in order to
make the PaaSTA stack work.
"""
import logging
import os
import pipes
import re
import socket
from time import sleep
from marathon import MarathonClient
from marathon import NotFoundError
import json
import service_configuration_lib
from paasta_tools.mesos_tools import get_local_slave_state
from paasta_tools.mesos_tools import get_mesos_slaves_grouped_by_attribute
from paasta_tools.utils import deploy_blacklist_to_constraints
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import get_code_sha_from_dockerurl
from paasta_tools.utils import get_config_hash
from paasta_tools.utils import get_default_branch
from paasta_tools.utils import get_docker_url
from paasta_tools.utils import get_service_instance_list
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import load_deployments_json
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import PaastaNotConfiguredError
from paasta_tools.utils import PATH_TO_SYSTEM_PAASTA_CONFIG_DIR
from paasta_tools.utils import timeout
CONTAINER_PORT = 8888
DEFAULT_SOA_DIR = service_configuration_lib.DEFAULT_SOA_DIR
# Marathon creates Mesos tasks with an id composed of the app's full name, a
# spacer, and a UUID. This variable is that spacer. Note that we don't control
# this spacer, i.e. you can't change it here and expect the world to change
# with you. We need to know what it is so we can decompose Mesos task ids.
MESOS_TASK_SPACER = '.'
PATH_TO_MARATHON_CONFIG = os.path.join(PATH_TO_SYSTEM_PAASTA_CONFIG_DIR, 'marathon.json')
PUPPET_SERVICE_DIR = '/etc/nerve/puppet_services.d'
log = logging.getLogger('__main__')
logging.getLogger('marathon').setLevel(logging.WARNING)
def load_marathon_config(path=PATH_TO_MARATHON_CONFIG):
try:
with open(path) as f:
return MarathonConfig(json.load(f), path)
except IOError as e:
raise PaastaNotConfiguredError("Could not load marathon config file %s: %s" % (e.filename, e.strerror))
class MarathonNotConfigured(Exception):
pass
class MarathonConfig(dict):
def __init__(self, config, path):
self.path = path
super(MarathonConfig, self).__init__(config)
def get_url(self):
"""Get the Marathon API url
:returns: The Marathon API endpoint"""
try:
return self['url']
except KeyError:
raise MarathonNotConfigured('Could not find marathon url in system marathon config: %s' % self.path)
def get_username(self):
"""Get the Marathon API username
:returns: The Marathon API username"""
try:
return self['user']
except KeyError:
raise MarathonNotConfigured('Could not find marathon user in system marathon config: %s' % self.path)
def get_password(self):
"""Get the Marathon API password
:returns: The Marathon API password"""
try:
return self['password']
except KeyError:
raise MarathonNotConfigured('Could not find marathon password in system marathon config: %s' % self.path)
def load_marathon_service_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR):
"""Read a service instance's configuration for marathon.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
log.info("Reading service configuration files from dir %s/ in %s" % (service, soa_dir))
log.info("Reading general configuration file: service.yaml")
general_config = service_configuration_lib.read_service_configuration(
service,
soa_dir=soa_dir
)
marathon_conf_file = "marathon-%s" % cluster
log.info("Reading marathon configuration file: %s.yaml", marathon_conf_file)
instance_configs = service_configuration_lib.read_extra_service_information(
service,
marathon_conf_file,
soa_dir=soa_dir
)
if instance not in instance_configs:
raise NoConfigurationForServiceError(
"%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, marathon_conf_file)
)
general_config.update(instance_configs[instance])
branch_dict = {}
if load_deployments:
deployments_json = load_deployments_json(service, soa_dir=soa_dir)
branch = general_config.get('branch', get_default_branch(cluster, instance))
branch_dict = deployments_json.get_branch_dict(service, branch)
return MarathonServiceConfig(
service,
instance,
general_config,
branch_dict,
)
class InvalidMarathonConfig(Exception):
pass
class MarathonServiceConfig(InstanceConfig):
def __init__(self, service, instance, config_dict, branch_dict):
super(MarathonServiceConfig, self).__init__(config_dict, branch_dict)
self.service = service
self.instance = instance
self.config_dict = config_dict
self.branch_dict = branch_dict
def __repr__(self):
return "MarathonServiceConfig(%r, %r, %r, %r)" % (
self.service,
self.instance,
self.config_dict,
self.branch_dict
)
def copy(self):
return self.__class__(self.service, self.instance, dict(self.config_dict), dict(self.branch_dict))
def get_instances(self):
"""Get the number of instances specified in the service's marathon configuration.
Defaults to 0 if not specified in the config.
:param service_config: The service instance's configuration dictionary
:returns: The number of instances specified in the config, 0 if not
specified or if desired_state is not 'start'."""
if self.get_desired_state() == 'start':
instances = self.config_dict.get('instances', 1)
return int(instances)
else:
return 0
def get_bounce_method(self):
"""Get the bounce method specified in the service's marathon configuration.
:param service_config: The service instance's configuration dictionary
:returns: The bounce method specified in the config, or 'crossover' if not specified"""
return self.config_dict.get('bounce_method', 'crossover')
def get_drain_method(self, service_namespace_config):
"""Get the drain method specified in the service's marathon configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain method specified in the config, or 'noop' if not specified"""
default = 'noop'
# Default to hacheck draining if the service is in smartstack
if service_namespace_config.is_in_smartstack():
default = 'hacheck'
return self.config_dict.get('drain_method', default)
def get_drain_method_params(self, service_namespace_config):
"""Get the drain method parameters specified in the service's marathon configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain_method_params dictionary specified in the config, or {} if not specified"""
default = {}
if service_namespace_config.is_in_smartstack():
default = {'delay': 30}
return self.config_dict.get('drain_method_params', default)
def get_constraints(self, service_namespace_config):
"""Gets the constraints specified in the service's marathon configuration.
These are Marathon app constraints. See
https://mesosphere.github.io/marathon/docs/constraints.html
Defaults to `GROUP_BY region`. If the service's smartstack configuration
specifies a `discover` key, then defaults to `GROUP_BY <value of discover>` instead.
:param service_namespace_config: The service instance's configuration dictionary
:returns: The constraints specified in the config, or defaults described above
"""
if 'constraints' in self.config_dict:
return self.config_dict.get('constraints')
else:
discover_level = service_namespace_config.get_discover()
locations = get_mesos_slaves_grouped_by_attribute(
attribute=discover_level, blacklist=self.get_deploy_blacklist())
deploy_constraints = deploy_blacklist_to_constraints(self.get_deploy_blacklist())
routing_constraints = [[discover_level, "GROUP_BY", str(len(locations))]]
return routing_constraints + deploy_constraints
def format_marathon_app_dict(self, app_id, docker_url, docker_volumes, service_namespace_config):
"""Create the configuration that will be passed to the Marathon REST API.
Currently compiles the following keys into one nice dict:
- id: the ID of the image in Marathon
- container: a dict containing the docker url and docker launch options. Needed by deimos.
- uris: blank.
- ports: an array containing the port.
- env: environment variables for the container.
- mem: the amount of memory required.
- cpus: the number of cpus required.
- constraints: the constraints on the Marathon app.
- instances: the number of instances required.
- cmd: the command to be executed.
- args: an alternative to cmd that requires the docker container to have an entrypoint.
The last 7 keys are retrieved using the get_<key> functions defined above.
:param app_id: The app id
:param docker_url: The url to the docker image the app will actually execute
:param docker_volumes: The docker volumes to run the image with, via the
marathon configuration file
:param service_namespace_config: The service instance's configuration dict
:returns: A dict containing all of the keys listed above"""
complete_config = {
'id': app_id,
'container': {
'docker': {
'image': docker_url,
'network': 'BRIDGE',
'portMappings': [
{
'containerPort': CONTAINER_PORT,
'hostPort': 0,
'protocol': 'tcp',
},
],
},
'type': 'DOCKER',
'volumes': docker_volumes,
},
'uris': ['file:///root/.dockercfg', ],
'backoff_seconds': 1,
'backoff_factor': 2,
'health_checks': self.get_healthchecks(service_namespace_config),
'env': self.get_env(),
'mem': float(self.get_mem()),
'cpus': float(self.get_cpus()),
'constraints': self.get_constraints(service_namespace_config),
'instances': self.get_instances(),
'cmd': self.get_cmd(),
'args': self.get_args(),
}
log.debug("Complete configuration for instance is: %s", complete_config)
return complete_config
def get_healthchecks(self, service_namespace_config):
"""Returns a list of healthchecks per `the Marathon docs`_.
If you have an http service, it uses the default endpoint that smartstack uses.
(/status currently)
Otherwise these do *not* use the same thresholds as smartstack in order to not
produce a negative feedback loop, where mesos agressivly kills tasks because they
are slow, which causes other things to be slow, etc.
If the mode of the service is None, indicating that it was not specified in the service config
and smartstack is not used by the service, no healthchecks are passed to Marathon. This ensures that
it falls back to Mesos' knowledge of the task state as described in `the Marathon docs`_.
In this case, we provide an empty array of healthchecks per `the Marathon API docs`_
(scroll down to the healthChecks subsection).
.. _the Marathon docs: https://mesosphere.github.io/marathon/docs/health-checks.html
.. _the Marathon API docs: https://mesosphere.github.io/marathon/docs/rest-api.html#post-/v2/apps
| |
<reponame>BlueBrain/covid-19-find<gh_stars>0
import matplotlib.pyplot as plt
import covidlib as cl
import pandas as pd
import math
import numpy as np
import sys
import os
import json
import cdata as cd
import datetime as dt
a=10
minphaselength = 14
maxphaselength = 28
lag = 26
sample =30
#shiftlag=56
shiftlag = lag+sample
#used to be fifty - 25 does NOT work
horizon = 50
#countryname = 'Switzerland'
#countrycode = 'CH'
#country_df = cd.getcountrydata(countrycode)
#country_df = cl.getcountrydata('Switzerland.csv')[['Date','accumulated_deaths','tests','accumulated_cases']]
epsilon = 0.0001
def setlengths(minp,maxp,hor):
global minphaselength
global maxphaselength
global horizon
minphaselength = minp
maxphaselength = maxp
horizon = hor
def aligntotest(dfactual,dfsimdeaths):
simdeaths = dfsimdeaths['total_deaths'].tolist()
day1 = dt.datetime.strptime(dfactual.iloc[0]['Date'],"%Y-%m-%d")-dt.timedelta(days=60)
empty_df=cl.create_empty_country_df(day1, 60)
frames=[empty_df,dfactual]
#this is inverting the concatenation
results_df=pd.concat(frames,ignore_index=True)[0:len(simdeaths)]
# actdeaths = dfactual['total_deaths'].tolist()
# aligneddeaths, shift = cl.aligndeaths(actdeaths,simdeaths)
# dfactual['sim_total_deaths'] = aligneddeaths
#lengths do not match
results_df['sim_total_deaths'] = simdeaths[0:len(results_df)]
# day0 = dfactual.iloc[shift]['Date']
return results_df
def getsimdeaths(dfx,sev,trig):
test_directory = 'scratch2'
fixed_params=cl.get_system_params(test_directory)
country_params=cd.getcountryparams(countrycode)
if not(country_params==None):
fixed_params.update(cd.getcountryparams(countrycode))
else:
print('Country files need to be updated')
print('Please run bbpsetup.py')
sys.exit()
fixed_params['test_directory'] = test_directory
fixed_params['past_severities'] = sev
fixed_params['past_dates'] = trig
fixed_params['expert_mode'] = False
fixed_params['save_results'] = "False"
fixed_params['run_multiple_test_scenarios'] = False
# local_df=dfx
# fixed_params['fatality_reduction'] = 0.35
day1 = dt.datetime.strptime(dfx.iloc[0]['Date'],"%Y-%m-%d")-dt.timedelta(days=60)
simphase,lastday=cl.computetoday(day1,trig)
span=lookahead(trig[-1], horizon,lastday)
fixed_params['num_days']=span
end_day=span
# =============================================================================
# fixed_params['num_days'] = len(country_df)+60
# end_day=len(country_df)+60
# =============================================================================
# print(len(country_df))
scenario_params=[]
scenario_params=cl.get_next_phases_scenarios(fixed_params['test_directory'])
try:
filename=os.path.join(fixed_params['test_directory'],'parameters.json')
except FileNotFoundError:
print('')
print('parameters file in ', fixed_params['test_directory'], ' not found')
sys.exit()
cl.write_parameters(filename,fixed_params,scenario_params)
# =============================================================================
# #I try to end the simulation ends 56 days after last day in simulation but this has bad effects
# endsim=trig[len(trig)-1]+56
# if endsim>fixed_params['num_days']:
# endsim=fixed_params['num_days']-1
# =============================================================================
dataframes, test_df,results_dict=cl.run_simulation(country_df,fixed_params,scenarios=scenario_params,end_day=end_day)
#This is unnecessary manipulation - would be cleaner to keep original dataframe with dates - would avoid risk of misindexing
firstdf = dataframes[1].rename(columns = {'deaths': 'total_deaths', 'newdeaths': 'new_deaths'}, inplace = False)
dfsum = firstdf.groupby(['days']).sum().reset_index()
deaths = dfsum[['total_deaths', 'new_deaths']]
return deaths
def scorealignment(result,span):
weight_total_deaths=0.2
# denom1 = result['total_deaths'].head(span).mean()
denom1 = result['total_deaths'].max()
if denom1>0:
meanreldev1 = result['absdiff'].head(span).mean()/denom1
else:
meanreldev1=0
# denom2 = result['new_deaths'].head(span).mean()
denom2 = result['new_deaths'].max()
if denom2>0:
meanreldev2 = result['absdiff_new_deaths'].head(span).mean()/denom2
else:
meanreldev2=0
return (meanreldev1*weight_total_deaths+meanreldev2*(1-weight_total_deaths))
# =============================================================================
# def scorealignment(result,span):
# # meandev1 = result['absdiff'].head(span).mean()
# meanreldev1 = result['absdiff'].head(span).mean()/result['total_deaths'].head(span).mean()
# meanreldev2 = result['absdiff_new_deaths'].head(span).mean()/result['new_deaths'].head(span).mean()
# return (meanreldev1*0.7+meanreldev2*0.3)
# =============================================================================
# =============================================================================
# def scorealignment(result,span):
# # meandev1 = result['absdiff'].head(span).mean()
# if result['total_deaths'].head(span).mean()>0:
# meanreldev1 = result['absdiff'].head(span).mean()/result['total_deaths'].head(span).mean()
# else:
# meanreldev1=0
# if result['new_deaths'].head(span).mean()>0:
# meanreldev2 = result['absdiff_new_deaths'].head(span).mean()/result['new_deaths'].head(span).mean()
# else:
# meanreldev2=0
# return (meanreldev1*0.9+meanreldev2*0.1)
# =============================================================================
# =============================================================================
# def scorealignment(result,span):
# #totweight is temporary. John defines it as a global in his code.
# totweight=0.7
# denom1 = result['total_deaths'].head(span).mean()
# if abs(denom1) < 1:
# denom1 = 1
# meanreldev1 = result['absdiff'].head(span).mean()/denom1
# denom2 = result['new_deaths'].head(span).mean()
# if abs(denom2) < 1:
# denom2 = 1
# meanreldev2 = result['absdiff_new_deaths'].head(span).mean()/denom2
# return (meanreldev1*totweight+meanreldev2*(1-totweight))
# =============================================================================
def runandalignsim(dfx,sev,trig):
simdeaths = getsimdeaths(dfx,sev,trig)
result = aligntotest(dfx,simdeaths)
# result['sim_new_deaths'] = result['sim_total_deaths'].diff().fillna(result['sim_total_deaths'].iloc[0]).rolling(7,center=True).mean()
result['sim_new_deaths'] = result['sim_total_deaths'].diff().fillna(result['sim_total_deaths'].iloc[0]).rolling(7).mean()
result['sim_growth'] = getgrowthrate(result['sim_total_deaths'],7)
# result['absdiff'] = abs(result.growth - result.sim_growth)
result['absdiff'] = abs(result.total_deaths - result.sim_total_deaths)
result['absdiff_new_deaths'] = abs(result.new_deaths - result.sim_new_deaths)
#The following two keys are never used
result['roll'] = result['absdiff'].rolling(3).mean()
result['roll_new_deaths'] = result['absdiff_new_deaths'].rolling(3).mean()
return result
def growth(x,y,roll):
if x == 0 or roll == 0:
return 0
else:
return (y/x)**(1/roll) - 1
def getgrowthrate(deaths,roll):
vals = deaths.tolist()
ans = vals.copy()
n = len(vals)
for i in range(n):
if i < roll:
ans[i] = growth(vals[0],vals[i],i)
else:
ans[i] = growth(vals[i-roll],vals[i],roll)
return ans
def getactualdeaths(countryname):
# dfactual = cd.getdeathdata(countryname)
# can i get the same stuff from country_df?
# yes! next three lines
dfactual = country_df.rename(columns = {'accumulated_deaths': 'total_deaths'}, inplace = False)
dfactual["New deaths"] = dfactual['total_deaths'].diff().fillna(dfactual['total_deaths'].iloc[0])
dfactual["New deaths"] = dfactual["New deaths"].astype(float)
dfx = pd.DataFrame()
dfx['Date'] = dfactual['Date']
dfx['orig_new_deaths'] = dfactual['New deaths']
# dfx['new_deaths'] = dfactual['New deaths'].rolling(28,center=True).mean()
dfx['new_deaths'] = dfactual['New deaths'].rolling(28).mean()
dfx['total_deaths'] = dfx['new_deaths'].cumsum()
dfx['growth'] = getgrowthrate(dfx['total_deaths'],7)
# dfx.to_csv('actualdeaths.csv',index=False)
return dfx.fillna(0)
def lookahead(base,inc,bound):
ans = base+inc
if (ans > bound):
return bound
else:
return ans
def findnexttrig(dfx, sev, trig, trignum):
print('.')
# lastday = len(dfx)+60
# No reason for this - correct last day is really +60
lastday = len(dfx)+60
sev.append(0.00)
trig.append(lastday)
sevsteps = 20
sevmult = 1.0/(sevsteps)
bestscore = 100000
bests = 0
besttrig = lastday # doesnt matter
lowerbound = trig[trignum-1]+minphaselength
upperbound = trig[trignum-1]+maxphaselength # should not go beyond lastday-shiftlag+30
if upperbound > lastday-shiftlag+30:
upperbound = lastday-shiftlag+30
# print("trigger index:",trignum)
# print("try from",lowerbound,"to",upperbound)
span = lastday
for s in range(0,sevsteps+1):
currsev = round(s*sevmult,2)
# print(">severity:",currsev)
scorerun = 0
score = 0
for t in range(lowerbound,upperbound,2):
lastscore = score
sev[trignum] = currsev
trig[trignum] = t
#have taken 14 off this on John's suggestion
# span = lookahead(t,horizon,lastday-14)
# as an experiment put it back to see if we now get better result for Peru
span = lookahead(t,horizon,lastday)
result = runandalignsim(dfx,sev,trig)
score = scorealignment(result,span) # span vs lastday
if abs(score - lastscore)<epsilon:
scorerun = scorerun + 1
else:
scorerun = 0
if scorerun == 5:
break
# print(currsev,t,score)
if score <= bestscore:
bestscore = score
bests = currsev
besttrig = t
# print(bestscore,bests,besttrig)
# print(">>best:",bests,besttrig,bestscore,"*",span)
sev[trignum] = bests
trig[trignum] = besttrig
return bestscore, sev, trig
def findnexttrig_finetune(dfx, sev, trig, trignum, sevguide, trigguide):
lastday = len(dfx)
sev.append(0.00)
trig.append(lastday)
sevsteps = 5
sevmult = 0.01
trigsteps = 1
bestscore = 100000
bests = 0
besttrig = lastday # doesnt matter
lowerbound = trigguide[trignum]-trigsteps
upperbound = trigguide[trignum]+trigsteps # should not go beyond lastday-shiftlag
if upperbound > lastday-shiftlag:
upperbound = lastday-shiftlag
print("trigger index:",trignum)
midsev = sevguide[trignum]
midtrig = trigguide[trignum]
for s in range(-sevsteps,sevsteps+1):
currsev = round(midsev+s*sevmult,2)
if currsev < 0:
continue
if currsev > 1:
continue
print(">severity:",currsev)
scorerun = 0
score = 0
for t in range(lowerbound,upperbound+1):
lastscore = score
sev[trignum] = currsev
trig[trignum] = t
span = lookahead(t,horizon,lastday)
result = runandalignsim(dfx,sev,trig)
score = scorealignment(result,span) # span vs lastday
if abs(score - lastscore)<epsilon:
scorerun = scorerun + 1
else:
scorerun = 0
if scorerun == 5:
break
print(currsev,t,score)
if score <= bestscore:
bestscore = score
bests = currsev
besttrig = t
# print(bestscore,bests,besttrig)
print(">>best:",bests,besttrig,bestscore)
sev[trignum] = bests
trig[trignum] = besttrig
return bestscore, sev, trig
def getbestfit(dfx, sev, trig):
sc = 0
#This is addition to compensate for added extra frame - previously 60
lastday = len(dfx)+60
i = len(sev)
print(trig[-1],(lastday-shiftlag))
while trig[-1] < (lastday-shiftlag):
sc, sev, trig = findnexttrig(dfx,sev,trig,i)
i = i + 1
return sc, sev, trig
def getbestfit_finetune(dfx, sev, trig, sevguide, trigguide):
sc = 0
lastday = len(dfx) - 1
n = len(trigguide)
for i in range(1,n):
sc, sev, trig = findnexttrig_finetune(dfx,sev,trig,i, sevguide, trigguide)
return sc, sev, trig
#-----------------------------------
# Many phases may have the same severity. Compress these
# Always keep the data for the last phase
# This ensures the output graph and the score will take account of the whole period
# ----------------------------------
def packseverities(sev,trig):
n = len(sev)
newsev = [sev[0]]
newtrig = [trig[0]]
j = 0
for i in range(1,n):
if sev[i] == newsev[j] and i<n-1:
continue
else:
newsev.append(sev[i])
newtrig.append(trig[i])
j = j + 1
return newsev,newtrig
def setcountry(ccode):
global countryname
global country_df
global countrycode
# earlystarters=['CN']
n_records=60
countrycode = ccode
countryname = cd.getcountryname(countrycode)
country_df = cd.getcountrydata(countrycode)
def checkenoughdeaths(ccode):
setcountry(ccode)
dfx = getactualdeaths(countryname)
zerodeathsprefix = dfx['total_deaths'].head(shiftlag).mean() < epsilon
lessthantwenty = dfx['new_deaths'].sum() < 20
return dfx,zerodeathsprefix,lessthantwenty
def computephases(ccode):
initsev = [0.0]
dfx,zerodeaths,lessthantwenty = checkenoughdeaths(ccode)
# =============================================================================
# if zerodeaths:
# initsev = [1.0]
# if lessthantwenty:
# return 1.0,dfx,[1.0],[1],[1.0],[1]
# =============================================================================
score, dfx, sev, trig, longsev, longtrig = extendphases(ccode, initsev, [1])
# =============================================================================
# if np.isnan(score)or not isinstance(score,float):
# score=0.0
#
# =============================================================================
return score, dfx, sev, trig, longsev, longtrig
# =============================================================================
# def computephases(ccode):
# score, dfx, sev, trig, longsev, longtrig = extendphases(ccode, [0.0], [1])
# return score, dfx, sev, trig, longsev, longtrig
# =============================================================================
def extendphases(ccode, sev, trig):
setcountry(ccode)
dfx = getactualdeaths(countryname)
score, sev, trig = getbestfit(dfx, sev, trig)
print('LAST SCORE',sev,trig,score)
result = runandalignsim(dfx,sev,trig)
score = scorealignment(result,len(dfx))
print('RESCORE',sev,trig,score)
nsev, ntrig = packseverities(sev, trig)
result = runandalignsim(dfx,nsev,ntrig)
score = scorealignment(result,len(dfx))
#when data on tests is bad we sometimes get string of zeros in severities
# this is a sign of error. This signals error to front end which will then signal unreliable result
# Date should be related to today
# =============================================================================
# if ntrig[-1]<550:
# score=2.0
# =============================================================================
print('PACKED SCORE',nsev,ntrig,score)
if np.isnan(score) or not isinstance(score,float):
score=0.0
return score, dfx, nsev, ntrig, sev, trig
def finetune(ccode, sevguide, trigguide):
sev = [sevguide[0]] # always 0.0
trig = [trigguide[0]] # always 1
setcountry(ccode)
dfx = getactualdeaths(countryname)
score, sev, trig = getbestfit_finetune(dfx, sev, trig, sevguide, trigguide)
sev, trig = packseverities(sev, trig)
result = runandalignsim(dfx,sev,trig)
score = scorealignment(result,len(dfx))
return score,sev, trig
def finetune1(ccode, origsev, origtrig):
sev = [0.0]
trig = [1]
setcountry(ccode)
dfx = getactualdeaths(countryname)
lastday = len(dfx)
sevsteps = 20
sevmult = 0.01
trigsteps = 10
n = | |
consumers of
this API, but is indirectly used via the different public interfaces.
see :func:`attribute_exists` (key) for details.
Registered keys which have never been explicitly set to a value do not
exist for the purpose of this call.
"""
# make sure interface is ready to use
d = self._attributes_t_init ()
# check if we know about that attribute
if key in d['attributes'] :
if 'exists' in d['attributes'][key] :
if d['attributes'][key]['exists'] :
return True
return False
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.one_of (_UP, _DOWN))
@rus.returns (bool)
def _attributes_i_is_extended (self, key, flow) :
"""
This internal method should not be explicitly called by consumers of
this API, but is indirectly used via the different public interfaces.
This method will check if the given key is extended, i.e. was registered
on the fly, vs. registered explicitly.
This method is not used by, and not exposed via the public API, yet.
"""
# make sure interface is ready to use
d = self._attributes_t_init (key)
return d['attributes'][key]['extended']
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.one_of (_UP, _DOWN))
@rus.returns (bool)
def _attributes_i_is_private (self, key, flow) :
"""
This internal method should not be explicitly called by consumers of
this API, but is indirectly used via the different public interfaces.
This method will check if the given key is private, i.e. starts with an
underscore and 'allow_private' is enabled.
This method is not used by, and not exposed via the public API, yet.
"""
# make sure interface is ready to use
d = self._attributes_t_init (key)
return d['attributes'][key]['private']
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.one_of (_UP, _DOWN))
@rus.returns (bool)
def _attributes_i_is_readonly (self, key, flow) :
"""
This internal method should not be explicitly called by consumers of
this API, but is indirectly used via the different public interfaces.
see L{attribute_is_readonly} (key) for details.
This method will check if the given key is readonly, i.e. cannot be
'set'. The call will also return 'True' if the attribute is final
"""
# make sure interface is ready to use
d = self._attributes_t_init (key)
# check if we know about that attribute
if d['attributes'][key]['mode'] == FINAL or \
d['attributes'][key]['mode'] == READONLY :
return True
return False
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.one_of (_UP, _DOWN))
@rus.returns (bool)
def _attributes_i_is_writeable (self, key, flow) :
"""
This internal method should not be explicitly called by consumers of
this API, but is indirectly used via the different public interfaces.
see :func:`attribute_is_writable` (key) for details.
This method will check if the given key is writeable - i.e. not readonly.
"""
return not self._attributes_i_is_readonly (key, flow=flow)
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.one_of (_UP, _DOWN))
@rus.returns (bool)
def _attributes_i_is_removable (self, key, flow) :
"""
This internal method should not be explicitly called by consumers of
this API, but is indirectly used via the different public interfaces.
see :func:`attribute_is_removable` (key) for details.
'True' if the attrib is writeable and Extended.
"""
if self._attributes_i_is_writeable (key, flow=flow) and \
self._attributes_i_is_extended (key, flow=flow) :
return True
return False
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.one_of (_UP, _DOWN))
@rus.returns (bool)
def _attributes_i_is_vector (self, key, flow) :
"""
This internal method should not be explicitly called by consumers of
this API, but is indirectly used via the different public interfaces.
see :func:`attribute_is_vector` (key) for details.
"""
# make sure interface is ready to use
d = self._attributes_t_init (key)
# check if we know about that attribute
if d['attributes'][key]['flavor'] == VECTOR :
return True
return False
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.one_of (_UP, _DOWN))
@rus.returns (bool)
def _attributes_i_is_final (self, key, flow) :
"""
This internal method should not be explicitly called by consumers of
this API, but is indirectly used via the different public interfaces.
This method will query the 'final' flag for an attribute, which signals
that the attribute will never change again.
This method is not used by, and not exposed via the public API, yet.
"""
# make sure interface is ready to use
d = self._attributes_t_init (key)
if FINAL == d['attributes'][key]['mode'] :
return True
# no final flag found -- assume non-finality!
return False
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
callable,
rus.one_of (_UP, _DOWN))
@rus.returns (int)
def _attributes_i_add_cb (self, key, cb, flow) :
"""
This internal method should not be explicitly called by consumers of
this API, but is indirectly used via the different public interfaces.
see :func:`add_callback` (key, cb) for details.
"""
# make sure interface is ready to use
d = self._attributes_t_init (key)
d['attributes'][key]['callbacks'].append (cb)
id = len (d['attributes'][key]['callbacks']) - 1
if flow==self._DOWN :
self._attributes_t_call_caller (key, id, cb)
return id
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.optional (int),
rus.one_of (_UP, _DOWN))
@rus.returns (rus.nothing)
def _attributes_i_del_cb (self, key, id=None, flow=_DOWN) :
"""
This internal method should not be explicitly called by consumers of
this API, but is indirectly used via the different public interfaces.
see :func:`remove_callback` (key, cb) for details.
"""
# make sure interface is ready to use
d = self._attributes_t_init (key)
if flow==self._DOWN :
self._attributes_t_call_caller (key, id, None)
# id == None: remove all callbacks
if not id :
d['attributes'][key]['callbacks'] = []
else :
if len (d['attributes'][key]['callbacks']) < id :
raise se.BadParameter ("invalid callback cookie for attribute %s" % key)
else :
# do not pop from list, that would invalidate the id's!
d['attributes'][key]['callbacks'][id] = None
# --------------------------------------------------------------------------
#
# This part of the interface is primarily for use in deriving classes, which
# thus provide the Attributes interface.
#
# Keys should be provided as CamelCase (only relevant if camelcasing is
# set).
#
# Naming: _attributes_*
#
@rus.takes ('Attributes',
str,
rus.optional (rus.optional (rus.anything)),
rus.optional (rus.one_of (ANY, URL, INT, FLOAT, STRING, BOOL, ENUM, TIME)),
rus.optional (rus.one_of (ANY, SCALAR, VECTOR, DICT)),
rus.optional (rus.one_of (READONLY, WRITEABLE, ALIAS, FINAL)),
rus.optional (rus.one_of (bool, EXTENDED)),
rus.optional (rus.one_of (_UP, _DOWN)))
@rus.returns (rus.nothing)
def _attributes_register (self, key, default=None, typ=ANY, flavor=ANY,
mode=WRITEABLE, ext=False, flow=_DOWN) :
"""
This interface method is not part of the public consumer API, but can
safely be called from within derived classes.
Register a new attribute.
This function ignores extensible, private, final and readonly flags. It
can also be used to re-register an existing attribute with new
properties -- the old attribute value, callbacks etc. will be lost
though. Using this call that way may result in confusing behaviour on
the public API level.
"""
# FIXME: check for valid mode and flavor settings
# make sure interface is ready to use
d = self._attributes_t_init ()
priv = False
if d['private'] and key[0] == '_' :
priv = True
# we expect keys to be registered as CamelCase (in those cases where
# that matters). But we store attributes in 'under_score' version.
us_key = self._attributes_t_underscore (key)
# retain old values
val = default
exists = False
if default != None :
exists = True
if us_key in d['attributes'] :
val = d['attributes'][us_key]['value']
exists = True
# register the attribute and properties
d['attributes'][us_key] = {}
d['attributes'][us_key]['value'] = val # initial value
d['attributes'][us_key]['default'] = default # default value
d['attributes'][us_key]['type'] = typ # int, float, enum, ...
d['attributes'][us_key]['exists'] = exists # no value set, yet?
d['attributes'][us_key]['flavor'] = flavor # scalar / vector
d['attributes'][us_key]['mode'] = mode # readonly / writeable / final
d['attributes'][us_key]['extended'] = ext # is an extended attribute
d['attributes'][us_key]['private'] = priv # is a private attribute
d['attributes'][us_key]['camelcase'] = key # keep original key name
d['attributes'][us_key]['underscore'] = us_key # keep under_scored name
d['attributes'][us_key]['enums'] = [] # list of valid enum values
d['attributes'][us_key]['checks'] = [] # list of custom value checks
d['attributes'][us_key]['callbacks'] = [] # list of callbacks
d['attributes'][us_key]['recursion'] = False # recursion check for callbacks
d['attributes'][us_key]['setter'] = None # custom attribute setter
d['attributes'][us_key]['getter'] = None # custom attribute getter
d['attributes'][us_key]['last'] = never # time of last refresh (never)
d['attributes'][us_key]['ttl'] = 0.0 # refresh delay (none)
# for enum types, we add a value checker
if typ == ENUM :
| |
self.tree = None
# $ANTLR start "process_expr"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:235:5: process_expr : ( expressions | network_send | network_mcast | transaction );
def process_expr(self, ):
retval = self.process_expr_return()
retval.start = self.input.LT(1)
root_0 = None
expressions170 = None
network_send171 = None
network_mcast172 = None
transaction173 = None
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:235:17: ( expressions | network_send | network_mcast | transaction )
alt27 = 4
LA27 = self.input.LA(1)
if LA27 in {ID}:
LA27_1 = self.input.LA(2)
if (LA27_1 == DOT) :
LA27 = self.input.LA(3)
if LA27 in {ID, NID, 96, 97, 98, 99, 100}:
alt27 = 1
elif LA27 in {102}:
alt27 = 2
elif LA27 in {101}:
alt27 = 3
else:
nvae = NoViableAltException("", 27, 5, self.input)
raise nvae
elif (LA27_1 in {EQUALSIGN, SEMICOLON}) :
alt27 = 1
else:
nvae = NoViableAltException("", 27, 1, self.input)
raise nvae
elif LA27 in {IF, STATE}:
alt27 = 1
elif LA27 in {AWAIT}:
alt27 = 4
else:
nvae = NoViableAltException("", 27, 0, self.input)
raise nvae
if alt27 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:235:19: expressions
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_expressions_in_process_expr2088)
expressions170 = self.expressions()
self._state.following.pop()
self._adaptor.addChild(root_0, expressions170.tree)
elif alt27 == 2:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:235:33: network_send
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_network_send_in_process_expr2092)
network_send171 = self.network_send()
self._state.following.pop()
self._adaptor.addChild(root_0, network_send171.tree)
elif alt27 == 3:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:235:48: network_mcast
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_network_mcast_in_process_expr2096)
network_mcast172 = self.network_mcast()
self._state.following.pop()
self._adaptor.addChild(root_0, network_mcast172.tree)
elif alt27 == 4:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:235:64: transaction
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_transaction_in_process_expr2100)
transaction173 = self.transaction()
self._state.following.pop()
self._adaptor.addChild(root_0, transaction173.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "process_expr"
class transaction_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "transaction"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:238:1: transaction : AWAIT OCBRACE ( trans )* CCBRACE -> ^( AWAIT_ ( trans )* ) ;
def transaction(self, ):
retval = self.transaction_return()
retval.start = self.input.LT(1)
root_0 = None
AWAIT174 = None
OCBRACE175 = None
CCBRACE177 = None
trans176 = None
AWAIT174_tree = None
OCBRACE175_tree = None
CCBRACE177_tree = None
stream_OCBRACE = RewriteRuleTokenStream(self._adaptor, "token OCBRACE")
stream_AWAIT = RewriteRuleTokenStream(self._adaptor, "token AWAIT")
stream_CCBRACE = RewriteRuleTokenStream(self._adaptor, "token CCBRACE")
stream_trans = RewriteRuleSubtreeStream(self._adaptor, "rule trans")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:238:13: ( AWAIT OCBRACE ( trans )* CCBRACE -> ^( AWAIT_ ( trans )* ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:238:15: AWAIT OCBRACE ( trans )* CCBRACE
pass
AWAIT174 = self.match(self.input, AWAIT, self.FOLLOW_AWAIT_in_transaction2110)
stream_AWAIT.add(AWAIT174)
OCBRACE175 = self.match(self.input, OCBRACE, self.FOLLOW_OCBRACE_in_transaction2112)
stream_OCBRACE.add(OCBRACE175)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:238:29: ( trans )*
while True: #loop28
alt28 = 2
LA28_0 = self.input.LA(1)
if (LA28_0 == WHEN) :
alt28 = 1
if alt28 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:238:29: trans
pass
self._state.following.append(self.FOLLOW_trans_in_transaction2114)
trans176 = self.trans()
self._state.following.pop()
stream_trans.add(trans176.tree)
else:
break #loop28
CCBRACE177 = self.match(self.input, CCBRACE, self.FOLLOW_CCBRACE_in_transaction2117)
stream_CCBRACE.add(CCBRACE177)
# AST Rewrite
# elements: trans
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 238:44: -> ^( AWAIT_ ( trans )* )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:238:47: ^( AWAIT_ ( trans )* )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(AWAIT_, "AWAIT_")
, root_1)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:238:56: ( trans )*
while stream_trans.hasNext():
self._adaptor.addChild(root_1, stream_trans.nextTree())
stream_trans.reset();
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "transaction"
class trans_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "trans"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:239:5: trans : WHEN ID DDOT ( trans_body )* -> ^( WHEN_ ^( GUARD_ ID ) ( trans_body )* ENDWHEN_ ) ;
def trans(self, ):
retval = self.trans_return()
retval.start = self.input.LT(1)
root_0 = None
WHEN178 = None
ID179 = None
DDOT180 = None
trans_body181 = None
WHEN178_tree = None
ID179_tree = None
DDOT180_tree = None
stream_WHEN = RewriteRuleTokenStream(self._adaptor, "token WHEN")
stream_DDOT = RewriteRuleTokenStream(self._adaptor, "token DDOT")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_trans_body = RewriteRuleSubtreeStream(self._adaptor, "rule trans_body")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:239:11: ( WHEN ID DDOT ( trans_body )* -> ^( WHEN_ ^( GUARD_ ID ) ( trans_body )* ENDWHEN_ ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:239:13: WHEN ID DDOT ( trans_body )*
pass
WHEN178 = self.match(self.input, WHEN, self.FOLLOW_WHEN_in_trans2137)
stream_WHEN.add(WHEN178)
ID179 = self.match(self.input, ID, self.FOLLOW_ID_in_trans2139)
stream_ID.add(ID179)
DDOT180 = self.match(self.input, DDOT, self.FOLLOW_DDOT_in_trans2141)
stream_DDOT.add(DDOT180)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:239:26: ( trans_body )*
while True: #loop29
alt29 = 2
LA29_0 = self.input.LA(1)
if (LA29_0 in {AWAIT, BREAK, ID, IF, NEXT, STATE}) :
alt29 = 1
if alt29 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:239:26: trans_body
pass
self._state.following.append(self.FOLLOW_trans_body_in_trans2143)
trans_body181 = self.trans_body()
self._state.following.pop()
stream_trans_body.add(trans_body181.tree)
else:
break #loop29
# AST Rewrite
# elements: ID, trans_body
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 239:38: -> ^( WHEN_ ^( GUARD_ ID ) ( trans_body )* ENDWHEN_ )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:239:41: ^( WHEN_ ^( GUARD_ ID ) ( trans_body )* ENDWHEN_ )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(WHEN_, "WHEN_")
, root_1)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:239:49: ^( GUARD_ ID )
root_2 = self._adaptor.nil()
root_2 = self._adaptor.becomeRoot(
self._adaptor.createFromType(GUARD_, "GUARD_")
, root_2)
self._adaptor.addChild(root_2,
stream_ID.nextNode()
)
self._adaptor.addChild(root_1, root_2)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:239:62: ( trans_body )*
while stream_trans_body.hasNext():
self._adaptor.addChild(root_1, stream_trans_body.nextTree())
stream_trans_body.reset();
self._adaptor.addChild(root_1,
self._adaptor.createFromType(ENDWHEN_, "ENDWHEN_")
)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "trans"
class trans_body_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "trans_body"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:240:9: trans_body : ( expressions | next_trans | next_break | transaction | network_send | network_mcast );
def trans_body(self, ):
retval = self.trans_body_return()
retval.start = self.input.LT(1)
root_0 = None
expressions182 = None
next_trans183 = None
next_break184 = None
transaction185 = None
network_send186 = None
network_mcast187 = None
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:240:20: ( expressions | next_trans | next_break | transaction | network_send | network_mcast )
alt30 = 6
LA30 = self.input.LA(1)
if LA30 in {ID}:
LA30_1 = self.input.LA(2)
if (LA30_1 == DOT) :
LA30 = self.input.LA(3)
if LA30 in {ID, NID, 96, 97, 98, 99, 100}:
alt30 = 1
elif LA30 in {102}:
alt30 = 5
elif LA30 in {101}:
alt30 = 6
else:
nvae = NoViableAltException("", 30, 7, self.input)
raise nvae
elif (LA30_1 in {EQUALSIGN, SEMICOLON}) :
alt30 = 1
else:
nvae = NoViableAltException("", 30, 1, self.input)
raise nvae
elif LA30 in {IF, STATE}:
alt30 = 1
elif LA30 in {NEXT}:
alt30 = 2
elif LA30 in {BREAK}:
alt30 = 3
elif LA30 in {AWAIT}:
alt30 = 4
else:
nvae = NoViableAltException("", 30, 0, self.input)
raise nvae
if alt30 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:240:22: expressions
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_expressions_in_trans_body2176)
expressions182 = self.expressions()
self._state.following.pop()
self._adaptor.addChild(root_0, expressions182.tree)
elif alt30 == 2:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:240:36: next_trans
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_next_trans_in_trans_body2180)
next_trans183 = self.next_trans()
self._state.following.pop()
self._adaptor.addChild(root_0, next_trans183.tree)
elif alt30 == 3:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:240:49: next_break
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_next_break_in_trans_body2184)
next_break184 = self.next_break()
self._state.following.pop()
self._adaptor.addChild(root_0, next_break184.tree)
elif alt30 == 4:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:240:62: transaction
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_transaction_in_trans_body2188)
transaction185 = self.transaction()
self._state.following.pop()
self._adaptor.addChild(root_0, transaction185.tree)
elif alt30 == 5:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:240:76: network_send
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_network_send_in_trans_body2192)
network_send186 = self.network_send()
self._state.following.pop()
self._adaptor.addChild(root_0, network_send186.tree)
elif alt30 == 6:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:240:91: network_mcast
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_network_mcast_in_trans_body2196)
network_mcast187 = self.network_mcast()
self._state.following.pop()
self._adaptor.addChild(root_0, network_mcast187.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "trans_body"
class next_trans_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "next_trans"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:241:13: next_trans : NEXT OCBRACE ( trans )* CCBRACE -> ^( NEXT_ ( trans )* ) ;
def next_trans(self, ):
retval = self.next_trans_return()
retval.start = | |
<reponame>mtaghiza/tinyos-main-1
#!/usr/bin/env python
# Serial Bootstrap Loader software for the MSP430 embedded proccessor.
#
# (C) 2001-2004 <NAME> <<EMAIL>>
# this is distributed under a free software license, see license.txt
#
# http://mspgcc.sf.net
#
# $Id: msp430-bsl.py,v 1.12 2006/04/23 21:28:24 cliechti Exp $
import sys
from mspgcc.util import curry, hexdump, makeihex
from mspgcc import memory, bsl
VERSION = "2.0"
DEBUG = 0 #disable debug messages by default
#enumeration of output formats for uploads
HEX = 0
INTELHEX = 1
BINARY = 2
def usage():
"""print some help message"""
sys.stderr.write("""
USAGE: %s [options] [file]
Version: %s
If "-" is specified as file the data is read from the stdinput.
A file ending with ".txt" is considered to be in TI-Text format,
'.a43' and '.hex' as IntelHex and all other filenames are
considered as ELF files.
General options:
-h, --help Show this help screen.
-c, --comport=port Specify the communication port to be used.
(Default is 0)
0->COM1 / ttyS0
1->COM2 / ttyS1
etc.
-P, --password=file Specify a file with the interrupt vectors that
are used as password. This can be any file that
has previously been used to program the device.
(e.g. -P INT_VECT.TXT).
-f, --framesize=num Max. number of data bytes within one transmitted
frame (16 to 240 in steps of 16) (e.g. -f 240).
-m, --erasecycles=num Number of mass erase cycles (default is 1). Some
old F149 devices need additional erase cycles.
On newer devices it is no longer needed. (e.g. for
an old F149: -m20)
-U, --unpatched Do not download the BSL patch, even when it is
needed. This is used when a program is downloaded
into RAM and executed from there (and where flash
programming is not needed.)
-D, --debug Increase level of debug messages. This won't be
very useful for the average user...
-I, --intelhex Force fileformat to IntelHex
-T, --titext Force fileformat to be TI-Text
-N, --notimeout Don't use timeout on serial port (use with care)
-B, --bsl=bsl.txt Load and use new BSL from the TI Text file
-S, --speed=baud Reconfigure speed, only possible with newer
MSP403-BSL versions (>1.5, read slaa089a.pdf for
details). If the --bsl option is not used, an
internal BSL replacement will be loaded.
Needs a target with at least 2kB RAM!
Possible values are 9600, 19200, 38400
(default 9600)
-1, --f1x Specify CPU family, in case autodetect fails
-4, --f4x Specify CPU family, in case autodetect fails
--F1x and --f2x are only needed when the "change
baudrate" feature is used and the autodetect feature
fails. If the device ID that is uploaded is known, it
has precedence to the command line option.
--invert-reset Invert signal on RST pin (used for some BSL hardware)
--invert-test Invert signal on TEST/TCK pin (used for some BSL
hardware)
--swap-reset-test Swap the TEST/TCK and RST control signals.
--test-on-tx Also toggle TX line for the TEST/TCK signal.
--ignore-answer Ignore answers and ACKs from the BSL (dont use unless
you know what you do)
--no-BSL-download Do not download replacement BSL (disable automatic)
--force-BSL-download Download replacement BSL even if not needed (the one
in the device would have the required features)
Program Flow Specifiers:
-e, --masserase Mass Erase (clear all flash memory)
-m, --mainerase Erase main flash memory only (requires --password)
--erase=address Selectively erase segment at the specified address
(requires --password)
--erase=adr1-adr2 Selectively erase a range of segments
(requires --password)
-E, --erasecheck Erase Check by file
-p, --program Program file
-v, --verify Verify by file
The order of the above options matters! The table is ordered by normal
execution order. For the options "Epv" a file must be specified.
Program flow specifiers default to "pvr" if a file is given.
Don't forget to specify "e" or "eE" when programming flash!
Data retreiving:
-u, --upload=addr Upload a datablock (see also: -s).
-s, --size=num Size of the data block do upload. (Default is 2)
-x, --hex Show a hexadecimal display of the uploaded data.
(Default)
-b, --bin Get binary uploaded data. This can be used
to redirect the output into a file.
Do before exit:
-g, --go=address Start programm execution at specified address.
This implies option --wait.
-r, --reset Reset connected MSP430. Starts application.
This is a normal device reset and will start
the programm that is specified in the reset
vector. (see also -g)
-w, --wait Wait for <ENTER> before closing serial port.
Address parameters for --erase, --upload, --size can be given in
decimal, hexadecimal or octal.
If it says "NAK received" it's probably because you specified no or a
wrong password. NAKs during programming indicate that the flash was not
erased before programming.
""" % (sys.argv[0], VERSION))
#Main:
def main():
global DEBUG
import getopt
filetype = None
filename = None
comPort = 0 #Default setting.
speed = None
unpatched = 0
reset = 0
wait = 0 #wait at the end
goaddr = None
bslobj = bsl.BootStrapLoader()
toinit = []
todo = []
startaddr = None
size = 2
outputformat= HEX
notimeout = 0
bslrepl = None
mayuseBSL = 1
forceBSL = 0
sys.stderr.write("MSP430 Bootstrap Loader Version: %s\n" % VERSION)
try:
opts, args = getopt.getopt(sys.argv[1:],
"hc:P:wf:m:eEpvrg:UDudsxbiITNB:S:V14",
["help", "comport=", "password=", "wait", "framesize=",
"erasecycles=", "masserase", "mainerase", "erasecheck", "program",
"verify", "reset", "go=", "unpatched", "debug",
"upload=", "download=", "size=", "hex", "bin", "ihex",
"intelhex", "titext", "notimeout", "bsl=", "speed=",
"bslversion", "f1x", "f4x", "invert-reset", "invert-test",
"no-BSL-download", "force-BSL-download", "erase=", "slow",
"swap-reset-test", "test-on-tx", "ignore-answer"]
)
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-c", "--comport"):
try:
comPort = int(a) #try to convert decimal
except ValueError:
comPort = a #take the string and let serial driver decide
elif o in ("-P", "--password"):
#extract password from file
bslobj.passwd = memory.Memory(a).getMemrange(0xffe0, 0xffff)
elif o in ("-w", "--wait"):
wait = 1
elif o in ("-f", "--framesize"):
try:
maxData = int(a) #try to convert decimal
except ValueError:
sys.stderr.write("Framesize must be a valid number\n")
sys.exit(2)
#Make sure that conditions for maxData are met:
#( >= 16 and == n*16 and <= MAX_DATA_BYTES!)
if maxData > bsl.BootStrapLoader.MAX_DATA_BYTES:
maxData = bsl.BootStrapLoader.MAX_DATA_BYTES
elif maxData < 16:
maxData = 16
bslobj.maxData = maxData - (maxData % 16)
sys.stderr.write( "Max. number of data bytes within one frame set to %i.\n" % maxData)
elif o in ("-m", "--erasecycles"):
try:
meraseCycles = int(a) #try to convert decimal
except ValueError:
sys.stderr.write("Erasecycles must be a valid number\n")
sys.exit(2)
#sanity check of value
if meraseCycles < 1:
sys.stderr.write("Erasecycles must be a positive number\n")
sys.exit(2)
if meraseCycles > 20:
sys.stderr.write("Warning: erasecycles set to a large number (>20): %d\n" % meraseCycles)
sys.stderr.write( "Number of mass erase cycles set to %i.\n" % meraseCycles)
bslobj.meraseCycles = meraseCycles
elif o in ("-e", "--masserase"):
toinit.append(bslobj.actionMassErase) #Erase entire Flash
elif o in ("-m", "--mainerase"):
toinit.append(bslobj.actionMainErase) #Erase main Flash
elif o == "--erase":
if '-' in a:
adr, adr2 = a.split('-', 1)
try:
adr = int(adr, 0)
except ValueError:
sys.stderr.write("Address range start address must be a valid number in dec, hex or octal\n")
sys.exit(2)
try:
adr2 = int(adr2, 0)
except ValueError:
sys.stderr.write("Address range end address must be a valid number in dec, hex or octal\n")
sys.exit(2)
while adr <= adr2:
if adr < 0x1100:
modulo = 64 # F2xx:64: F1xx, F4xx: 128 (segments get erased twice)
elif adr < 0x1200:
modulo = 256
else:
modulo = 512
adr = adr - (adr % modulo)
toinit.append(bslobj.makeActionSegmentErase(adr))
adr = adr + modulo
else:
try:
seg = int(a, 0)
toinit.append(bslobj.makeActionSegmentErase(seg))
except ValueError:
sys.stderr.write("Segment address must be a valid number in dec, hex or octal or a range adr1-adr2\n")
sys.exit(2)
elif o in ("-E", "--erasecheck"):
toinit.append(bslobj.actionEraseCheck) #Erase Check (by file)
elif o in ("-p", "--programm"):
todo.append(bslobj.actionProgram) #Program file
elif o in ("-v", "--verify"):
todo.append(bslobj.actionVerify) #Verify file
elif o in ("-r", "--reset"):
reset = 1
elif o in ("-g", "--go"):
try:
goaddr = int(a) #try to convert decimal
except ValueError:
try:
goaddr = int(a[2:],16) #try to convert hex
except ValueError:
sys.stderr.write("Go address must be a valid number\n")
sys.exit(2)
wait = 1
elif o in ("-U", "--unpatched"):
unpatched = 1
elif o in ("-D", "--debug"):
DEBUG = DEBUG + 1
bsl.DEBUG = bsl.DEBUG + 1
| |
"""
We implement a power represenation for the value function,
together with methods to initialize and update it
"""
import numpy as np
from scipy.optimize import minimize,nnls
import matplotlib.pyplot as plt
def curve_fit_search_and_grad(gamma, Xi, Yi, Xmax):
Xi_arg = (Xmax + np.exp(gamma[3]) - Xi)/ 100.0
Xi_pow = np.power( Xi_arg , gamma[2])
Ri = gamma[0] + gamma[1] * Xi_pow - Yi
val = np.power(Ri, 2).mean()
# the optimizer can handle invalid returns for gradient
# with np.errstate(divide='ignore'):
# with np.errstate(invalid='ignore'):
g1 = 2 * Ri.mean()
g2 = 2 * ( Ri * Xi_pow ).mean()
g3 = 2 * ( Ri * np.log( Xi_arg ) * Xi_pow * gamma[1] ).mean()
g4 = 2 * ( Ri * gamma[1] * gamma[2] * np.exp(gamma[3]) * np.power( Xi_arg , gamma[2] - 1 ) ).mean()
return val, np.array([g1,g2,g3,g4])
def curve_fit_search_terms(gamma, Xi, Yi, Xmax):
Xi_arg = (Xmax + np.exp(gamma[3]) - Xi)/100.0
Xi_pow = np.power( Xi_arg , gamma[2])
return Xi_pow,Yi
def curve_eval(gamma, Xi, Xmax):
Xi_arg = (Xmax + np.exp(gamma[3]) - Xi)/100.0
Xi_pow = np.power( Xi_arg , gamma[2])
return gamma[0] + gamma[1] * Xi_pow
class PowerFunctionGrid:
""" Class that represents the value function using a power function representation.
The different parameters are stored in gamma_all
Y = g0 + g1*(g4 + exp(g3) - X)^g2
note, instead of using Vmax here, it might be better to use a more stable value, ie the
actual max value of promisable utility to the worker which is u(infty)/r
we might then be better of linearly fitting functions of the sort g1 * ( Vmax - X)^ g2 for
a list of g2.
"""
def __init__(self,W1,J1,weight=0.01):
self.num_z, _ , self.num_x = J1.shape
self.gamma_all = np.zeros( (self.num_z,self.num_x,5) )
self.rsqr = np.zeros( (self.num_z,self.num_x))
self.weight = weight
# we fit for each (z,x)
p0 = [0, -1, -1, np.log(0.1)]
for ix in range(self.num_x):
for iz in range(self.num_z):
p0[0] = J1[iz, 0, ix]
res2 = minimize(curve_fit_search_and_grad, p0, jac=True,
options={'gtol': 1e-8, 'disp': False, 'maxiter': 2000},
args=(W1[iz, :, ix], J1[iz, :, ix], W1[iz, :, ix].max()))
p0 = res2.x
self.gamma_all[iz, ix, 0:4] = res2.x
self.gamma_all[iz, ix, 4] = W1[iz, :, ix].max()
self.rsqr[iz, ix] = res2.fun / np.power(J1[iz, :, ix],2).mean()
def eval_at_zxv(self,z,x,v):
return curve_eval(self.gamma_all[z,x,0:4],v,self.gamma_all[z,x,4])
def get_vmax(self,z,x):
return self.gamma_all[z, x, 4] + np.exp(self.gamma_all[z, x, 3])
def eval_at_W1(self,W1):
J1_hat = np.zeros(W1.shape)
for ix in range(self.num_x):
for iz in range(self.num_z):
J1_hat[iz,:,ix] = self.eval_at_zxv(iz,ix,W1[iz,:,ix])
# make a for loop on x,z
return(J1_hat)
def mse(self,W1,J1):
mse_val = 0
for ix in range(self.num_x):
for iz in range(self.num_z):
val,_ = curve_fit_search_and_grad( self.gamma_all[iz,ix,0:4], W1[iz, :, ix], J1[iz, :, ix], self.gamma_all[iz, ix, 4] )
mse_val = mse_val + val
return(mse_val)
def update(self,W1,J1,lr,nsteps):
"""
Updates the parameters gamma using nsteps newton steps and lr as the learning rate
:param W1: W1 input values to fit
:param J1: J1 input values to fit
:param lr: learning rate
:param nsteps: number of steps
:return:
"""
mean_update = 0
for ix in range(self.num_x):
for iz in range(self.num_z):
val,grad = curve_fit_search_and_grad( self.gamma_all[iz,ix,0:4], W1[iz, :, ix], J1[iz, :, ix], W1[iz, :, ix].max() )
self.gamma_all[iz, ix, 0:4] = self.gamma_all[iz,ix,0:4] - lr * grad
self.gamma_all[iz, ix, 4] = W1[iz, :, ix].max()
mean_update = mean_update + np.abs(lr * grad).mean()
return(mean_update/(self.num_x*self.num_z))
def update_cst(self,W1,J1,lr,nsteps):
"""
Updates the parameters gamma using nsteps newton steps and lr as the learning rate
:param W1: W1 input values to fit
:param J1: J1 input values to fit
:param lr: learning rate
:param nsteps: number of steps
:return:
"""
tot_update_chg = 0
for ix in range(self.num_x):
for iz in range(self.num_z):
val,grad = curve_fit_search_and_grad( self.gamma_all[iz,ix,0:4], W1[iz, :, ix], J1[iz, :, ix], W1[iz, :, ix].max() )
self.gamma_all[iz, ix, 0:2] = self.gamma_all[iz,ix,0:2] - lr * grad[0:2]
self.gamma_all[iz, ix, 4] = W1[iz, :, ix].max()
tot_update_chg += np.abs(lr * grad[0:2]).mean()
return(tot_update_chg/(self.num_x*self.num_z))
def update_cst_ls(self,W1,J1):
"""
Updates the parameters intercept and slope parameters of the representative
function using lease square. Also stores the highest value to g4.
:param W1: W1 input values to fit
:param J1: J1 input values to fit
:param lr: learning rate
:param nsteps: number of steps
:return:
"""
tot_update_chg = 0
pj_last = np.copy(self.gamma_all)
for ix in range(self.num_x):
for iz in range(self.num_z):
Xi,Yi = curve_fit_search_terms( self.gamma_all[iz,ix,0:4], W1[iz, :, ix], J1[iz, :, ix], W1[iz, :, ix].max() )
# W = np.exp(- self.weight * np.power(Yi,2))
W = 1.0 * (Yi >= -50)
W = W / W.sum()
xbar = ( Xi * W ).sum()
ybar = ( Yi * W ).sum()
self.gamma_all[iz, ix, 1] = ( (Xi-xbar) * (Yi-ybar) * W ).sum() / ( (Xi-xbar) * (Xi-ybar) * W ).sum()
self.gamma_all[iz, ix, 0] = ( (Yi - self.gamma_all[iz, ix, 1]* Xi) * W ).sum()
self.gamma_all[iz, ix, 4] = W1[iz, :, ix].max()
rsq = 1 - self.mse(W1,J1)/ np.power(J1,2).sum()
chg = (np.power(pj_last - self.gamma_all,2).mean(axis=(0,1)) / np.power(pj_last,2).mean(axis=(0,1))).mean()
return(chg,rsq)
class PowerFunctionGrid2:
""" Class that represents the value function using a power function representation.
The different parameters are stored in gamma_all
Y = 1 - sum_k g0k*(gm - X)^(-g1k)
"""
def __init__(self,W1,J1,vmax, gpow= np.arange(0.0,20.0,1) ,weight=0.01):
self.num_z, _ , self.num_x = J1.shape
self.num_g = len(gpow)
self.gpow = np.array(gpow) # the sequence of power to use
self.gamma_all = np.zeros( (self.num_z,self.num_x,self.num_g) )
self.rsqr = np.zeros( (self.num_z,self.num_x))
self.weight = weight
self.vmax = vmax
# we fit for each (z,x)
for ix in range(self.num_x):
for iz in range(self.num_z):
self.gpow = np.exp( np.arange(-4,4))
Yi = J1[iz, :, ix]
# compute the design matrix
XX1 = - np.power(self.vmax - W1[iz, :, ix][:,np.newaxis] , - self.gpow[np.newaxis,:])
# constant plus linear
XX2 = - np.power(W1[iz, :, ix][:,np.newaxis] , np.arange(2)[np.newaxis,:])
XX2[:,0] = - XX2[:,0]
XX = np.concatenate([XX1,XX2],axis=1)
# prepare weights
W = np.sqrt(1.0 * (Yi >= -50))
# fit parameters imposing non-negativity
par,norm = nnls(XX * W[:,np.newaxis], Yi * W)
rsq = np.power( W * np.matmul(XX,par) , 2).mean() / np.power( W * Yi, 2).mean()
I = W>0
plt.plot( W1[iz, I, ix], J1[iz, I, ix],'blue')
#for k in range(1,len(self.gpow)):
# plt.plot( W1[iz, I, ix], XX[I,k] * par[k],"--")
plt.plot( W1[iz, I, ix], np.matmul(XX[I,:],par),'red')
plt.show()
p0 = 0
res2 = minimize(curve_fit_search_and_grad, p0, jac=True,
options={'gtol': 1e-8, 'disp': False, 'maxiter': 2000},
args=(W1[iz, :, ix], J1[iz, :, ix], W1[iz, :, ix].max()))
self.gamma_all[iz, ix, 0:4] = res2.x
self.rsqr[iz, ix] = 0
def eval_at_zxv(self,z,x,v):
return curve_eval(self.gamma_all[z,x,0:4],v,self.gamma_all[z,x,4])
def eval_at_W1(self,W1):
J1_hat = np.zeros(W1.shape)
for ix in range(self.num_x):
for iz in range(self.num_z):
J1_hat[iz,:,ix] = self.eval_at_zxv(iz,ix,W1[iz,:,ix])
# make a for loop on x,z
return(J1_hat)
def mse(self,W1,J1):
mse_val = 0
for ix in range(self.num_x):
for iz in range(self.num_z):
val,grad = curve_fit_search_and_grad( self.gamma_all[iz,ix,0:4], W1[iz, :, ix], J1[iz, :, ix], self.gamma_all[iz, ix, 4] )
mse_val = mse_val + val
return(mse_val)
def update(self,W1,J1,lr,nsteps):
"""
Updates the parameters gamma using nsteps newton steps and lr as the learning rate
:param W1: W1 input values to fit
:param J1: J1 input values to fit
:param lr: learning rate
:param nsteps: number of steps
:return:
"""
mean_update = 0
for ix in range(self.num_x):
for iz in range(self.num_z):
val,grad = curve_fit_search_and_grad( self.gamma_all[iz,ix,0:4], W1[iz, :, ix], J1[iz, :, ix], W1[iz, :, ix].max() )
self.gamma_all[iz, ix, 0:4] = self.gamma_all[iz,ix,0:4] - lr * grad
self.gamma_all[iz, ix, 4] = W1[iz, :, ix].max()
mean_update = mean_update + np.abs(lr * grad).mean()
return(mean_update/(self.num_x*self.num_z))
def update_cst(self,W1,J1,lr,nsteps):
"""
Updates the parameters gamma using nsteps newton steps and lr as the learning rate
:param W1: W1 input values to fit
:param J1: J1 input values to fit
:param lr: learning rate
:param nsteps: number of steps
:return:
"""
tot_update_chg = 0
for ix in range(self.num_x):
for iz in range(self.num_z):
val,grad = curve_fit_search_and_grad( self.gamma_all[iz,ix,0:4], W1[iz, :, ix], J1[iz, :, ix], W1[iz, :, ix].max() )
self.gamma_all[iz, ix, 0:2] = self.gamma_all[iz,ix,0:2] - lr * grad[0:2]
self.gamma_all[iz, ix, 4] = W1[iz, :, ix].max()
tot_update_chg += np.abs(lr * grad[0:2]).mean()
return(tot_update_chg/(self.num_x*self.num_z))
def update_cst_ls(self,W1,J1):
"""
Updates the parameters intercept and slope parameters of the representative
function using lease square. Also stores the highest value to g4.
:param W1: W1 input values to fit
:param J1: J1 input values to fit
:param lr: learning rate
:param nsteps: number of steps
:return:
"""
tot_update_chg = 0
pj_last = np.copy(self.gamma_all)
for ix in range(self.num_x):
for iz in | |
for non-local libjars (e.g. on EMR).
"""
return self._opts['libjars']
def _interpolate_jar_step_args(self, args, step_num):
"""Like :py:meth:`_interpolate_step_args` except it
also replaces `~mrjob.step.GENERIC_ARGS` with
:py:meth:`_hadoop_generic_args_for_step`. This only
makes sense for jar steps; Spark should raise an error
if `~mrjob.step.GENERIC_ARGS` is encountered.
"""
result = []
for arg in args:
if arg == mrjob.step.GENERIC_ARGS:
result.extend(
self._hadoop_generic_args_for_step(step_num))
else:
result.append(arg)
return self._interpolate_step_args(result, step_num)
### setup scripts ###
def _py_files(self):
"""Everything in the *py_files* opt, plus a .zip of the mrjob
library if needed.
"""
py_files = list(self._opts['py_files'])
if self._bootstrap_mrjob() and self._BOOTSTRAP_MRJOB_IN_PY_FILES:
py_files.append(self._create_mrjob_zip())
return py_files
def _create_setup_wrapper_scripts(self):
"""Create the setup wrapper script, and write it into our local temp
directory (by default, to a file named setup-wrapper.sh).
This will set ``self._setup_wrapper_script_path``, and add it to
``self._working_dir_mgr``
This will do nothing if ``self._setup`` is empty or
this method has already been called.
If *local* is true, use local line endings (e.g. Windows). Otherwise,
use UNIX line endings (see #1071).
"""
if self._has_hadoop_streaming_steps():
streaming_setup = self._py_files_setup() + self._setup
if streaming_setup and not self._setup_wrapper_script_path:
self._setup_wrapper_script_path = self._write_setup_script(
streaming_setup, 'setup-wrapper.sh',
'streaming setup wrapper script')
if (self._uses_input_manifest() and not
self._manifest_setup_script_path):
self._manifest_setup_script_path = self._write_setup_script(
streaming_setup, 'manifest-setup.sh',
'manifest setup wrapper script',
manifest=True)
if (self._has_pyspark_steps() and
self._spark_executors_have_own_wd() and
not self._spark_python_wrapper_path):
pyspark_setup = self._pyspark_setup()
if pyspark_setup:
self._spark_python_wrapper_path = self._write_setup_script(
pyspark_setup,
'python-wrapper.sh', 'Spark Python wrapper script',
wrap_python=True)
def _pyspark_setup(self):
"""Like ``self._setup``, but prepends commands for archive
emulation if needed."""
setup = []
if self._emulate_archives_on_spark():
for name, path in sorted(
self._working_dir_mgr.name_to_path('archive').items()):
archive_file_name = self._working_dir_mgr.name(
'archive_file', path)
setup.append(_unarchive_cmd(path) % dict(
file=pipes.quote(archive_file_name),
dir=pipes.quote(name)))
setup.extend(self._setup)
return setup
def _py_files_setup(self):
"""A list of additional setup commands to emulate Spark's
--py-files option on Hadoop Streaming."""
result = []
for py_file in self._py_files():
path_dict = {'type': 'file', 'name': None, 'path': py_file}
self._working_dir_mgr.add(**path_dict)
result.append(['export PYTHONPATH=', path_dict, ':$PYTHONPATH'])
return result
def _write_setup_script(self, setup, filename, desc,
manifest=False, wrap_python=False):
"""Write a setup script and return its path."""
contents = self._setup_wrapper_script_content(
setup, manifest=manifest, wrap_python=wrap_python)
path = os.path.join(self._get_local_tmp_dir(), filename)
self._write_script(contents, path, desc)
self._working_dir_mgr.add('file', path)
return path
def _create_mrjob_zip(self):
"""Make a zip of the mrjob library, without .pyc or .pyo files,
This will also set ``self._mrjob_zip_path`` and return it.
Typically called from
:py:meth:`_create_setup_wrapper_scripts`.
It's safe to call this method multiple times (we'll only create
the zip file once.)
"""
if not self._mrjob_zip_path:
# find mrjob library
import mrjob
if not os.path.basename(mrjob.__file__).startswith('__init__.'):
raise Exception(
"Bad path for mrjob library: %s; can't bootstrap mrjob",
mrjob.__file__)
mrjob_dir = os.path.dirname(mrjob.__file__) or '.'
zip_path = os.path.join(self._get_local_tmp_dir(), 'mrjob.zip')
def filter_path(path):
filename = os.path.basename(path)
return not(filename.lower().endswith('.pyc') or
filename.lower().endswith('.pyo') or
# filter out emacs backup files
filename.endswith('~') or
# filter out emacs lock files
filename.startswith('.#') or
# filter out MacFuse resource forks
filename.startswith('._'))
log.debug('archiving %s -> %s as %s' % (
mrjob_dir, zip_path, os.path.join('mrjob', '')))
zip_dir(mrjob_dir, zip_path, filter=filter_path, prefix='mrjob')
self._mrjob_zip_path = zip_path
return self._mrjob_zip_path
def _setup_wrapper_script_content(
self, setup, manifest=False, wrap_python=False):
"""Return a (Bourne) shell script that runs the setup commands and then
executes whatever is passed to it (this will be our mapper/reducer),
as a list of strings (one for each line, including newlines).
We obtain a file lock so that two copies of the setup commands
cannot run simultaneously on the same machine (this helps for running
:command:`make` on a shared source code archive, for example).
"""
lines = []
# TODO: this is very similar to _start_of_sh_script() in cloud.py
if wrap_python:
# start with shebang
sh_bin = self._sh_bin()
if os.path.isabs(sh_bin[0]):
shebang_bin = sh_bin
else:
shebang_bin = ['/usr/bin/env'] + list(sh_bin)
if len(shebang_bin) > 2:
# Linux limits shebang to one binary and one arg
shebang_bin = shebang_bin[:2]
log.warning('Limiting shebang to two arguments:'
'#!%s' % cmd_line(shebang_bin))
lines.append('#!%s' % cmd_line(shebang_bin))
# hook for 'set -e', etc.
pre_commands = self._sh_pre_commands()
if pre_commands:
for cmd in pre_commands:
lines.append(cmd)
lines.append('')
if setup:
lines.extend(self._setup_cmd_content(setup))
# handle arguments to the script
if wrap_python:
# pretend to be python ($@ is arguments to the python binary)
python_bin = self._task_python_bin()
lines.append('%s "$@"' % cmd_line(python_bin))
elif manifest:
# arguments ($@) are a command
# eventually runs: "$@" $INPUT_PATH $INPUT_URI
lines.extend(self._manifest_download_content())
else:
# arguments ($@) are a command, just run it
lines.append('"$@"')
return lines
def _setup_cmd_content(self, setup):
"""Write setup script content to obtain a file lock, run setup
commands in a way that doesn't perturb the script, and then
release the lock and return to the original working directory."""
lines = []
lines.append('# store $PWD')
lines.append('__mrjob_PWD=$PWD')
lines.append('')
lines.append('# obtain exclusive file lock')
# Basically, we're going to tie file descriptor 9 to our lockfile,
# use a subprocess to obtain a lock (which we somehow inherit too),
# and then release the lock by closing the file descriptor.
# File descriptors 10 and higher are used internally by the shell,
# so 9 is as out-of-the-way as we can get.
lines.append('exec 9>/tmp/wrapper.lock.%s' % self._job_key)
# would use flock(1), but it's not always available
lines.append("%s -c 'import fcntl; fcntl.flock(9, fcntl.LOCK_EX)'" %
cmd_line(self._python_bin()))
lines.append('')
lines.append('# setup commands')
# group setup commands so we can redirect their input/output (see
# below). Don't use parens; this would invoke a subshell, which would
# keep us from exporting environment variables to the task.
lines.append('{')
for cmd in setup:
# reconstruct the command line, substituting $__mrjob_PWD/<name>
# for path dicts
line = ' ' # indent, since these commands are in a group
for token in cmd:
if isinstance(token, dict):
# it's a path dictionary
line += '$__mrjob_PWD/'
line += pipes.quote(self._working_dir_mgr.name(**token))
else:
# it's raw script
line += token
lines.append(line)
# redirect setup commands' input/output so they don't interfere
# with the task (see Issue #803).
lines.append('} 0</dev/null 1>&2')
lines.append('')
lines.append('# release exclusive file lock')
lines.append('exec 9>&-')
lines.append('')
lines.append('# run task from the original working directory')
lines.append('cd $__mrjob_PWD')
return lines
def _manifest_download_content(self):
"""write the part of the manifest setup script after setup, that
downloads the input file, runs the script, and then deletes
the file."""
lines = []
lines.append('{')
# read URI from stdin
lines.append(' # read URI of input file from stdin')
lines.append(' INPUT_URI=$(cut -f 2)')
lines.append('')
# pick file extension (e.g. ".warc.gz")
lines.append(' # pick file extension')
lines.append(" FILE_EXT=$(basename $INPUT_URI | sed -e 's/^[^.]*//')")
lines.append('')
# pick a unique name in the current directory to download the file to
lines.append(' # pick filename to download to')
lines.append(' INPUT_PATH=$(mktemp ./input-XXXXXXXXXX$FILE_EXT)')
lines.append(' rm $INPUT_PATH')
lines.append('')
# download the file (using different commands depending on the path)
lines.append(' # download the input file')
lines.append(' case $INPUT_URI in')
download_cmds = (
list(self._manifest_download_commands()) + [('*', 'cp')])
for glob, cmd in download_cmds:
lines.append(' %s)' % glob)
lines.append(' %s $INPUT_URI $INPUT_PATH' % cmd)
lines.append(' ;;')
lines.append(' esac')
lines.append('')
# unpack .bz2 and .gz files
lines.append(' # if input file is compressed, unpack it')
lines.append(' case $INPUT_PATH in')
for ext, cmd in self._manifest_uncompress_commands():
lines.append(' *.%s)' % ext)
lines.append(' %s $INPUT_PATH' % cmd)
lines.append(" INPUT_PATH="
r"$(echo $INPUT_PATH | sed -e 's/\.%s$//')" % ext)
lines.append(' ;;')
lines.append(' esac')
lines.append('} 1>&2')
lines.append('')
# don't exit if script fails
lines.append('# run our mrjob script')
lines.append('set +e')
# pass input path and URI to script
lines.append('"$@" $INPUT_PATH $INPUT_URI')
lines.append('')
# save return code, turn off echo
lines.append('# if script fails, print input URI before exiting')
lines.append('{ RETURNCODE=$?; set +x; } &> /dev/null')
lines.append('')
lines.append('{')
# handle errors
lines.append(' if [ $RETURNCODE -ne 0 ]')
lines.append(' then')
lines.append(' echo')
lines.append(' echo "while reading input from $INPUT_URI"')
lines.append(' fi')
lines.append('')
# clean up input
lines.append(' rm $INPUT_PATH')
lines.append('} 1>&2')
lines.append('')
# exit with correct status
lines.append('exit $RETURNCODE')
return lines
def _manifest_download_commands(self):
"""Return a list of ``(glob, cmd)``, where *glob*
matches a path or URI to download, and download command is a command
to download it (e.g. ```hadoop fs -copyToLocal``), as a
string.
Redefine this in your subclass. More specific blobs should come first.
"""
return []
def _manifest_uncompress_commands(self):
"""Return a list of ``(ext, cmd)`` where ``ext`` is a file extension
(e.g. ``gz``) and ``cmd`` is a command to uncompress it (e.g.
``gunzip``)."""
return [
('bz2', 'bunzip2'),
('gz', 'gunzip'),
| |
# Import any DAObject classes that you will need
from docassemble.base.util import Individual, Person, DAObject
# Import the SQLObject and some associated utility functions
from docassemble.base.sql import alchemy_url, upgrade_db, SQLObject, SQLObjectRelationship
# Import SQLAlchemy names
from sqlalchemy import Column, ForeignKey, Integer, String, create_engine, or_, and_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
import sys
# Only allow these names (DAObject classes) to be imported with a modules block
#__all__ = ['Bank', 'Customer', 'BankCustomer']
__all__ = ['Processo', 'Requerido', 'ProcessoRequerido']
# Create the base class for SQLAlchemy table definitions
Base = declarative_base()
# SQLAlchemy table definition for a Processo
class ProcessoModel(Base):
__tablename__ = 'processo'
id = Column(Integer, primary_key=True)
routing = Column(String(250), unique=True)
name = Column(String(250))
# SQLAlchemy table definition for a Requerido
class RequeridoModel(Base):
__tablename__ = 'requerido'
id = Column(Integer, primary_key=True)
cpf = Column(String(250), unique=True)
first_name = Column(String(250))
last_name = Column(String(250))
address = Column(String(250))
unit = Column(String(250))
city = Column(String(250))
state = Column(String(250))
zip = Column(String(250))
nacionality = Column(String(250))
capacity = Column(String(250))
legal_personal_representative = Column(String(250))
marital_status = Column(String(250))
city_of_birth = Column(String(250))
birth_date = Column(String(250))
profession = Column(String(250))
identity_document = Column(String(250))
fathers_name = Column(String(250))
mothers_name = Column(String(250))
phone_number = Column(String(250))
# uses_whatsapp = Column(Boolean())
# SQLAlchemy table definition for keeping track of which Banks have which Customers
class ProcessoRequeridoModel(Base):
__tablename__ = 'processo_requerido'
id = Column(Integer, primary_key=True)
processo_id = Column(Integer, ForeignKey('processo.id', ondelete='CASCADE'), nullable=False)
requerido_id = Column(Integer, ForeignKey('requerido.id', ondelete='CASCADE'), nullable=False)
# Form the URL for connecting to the database based on the "demo db" directive in the Configuration
url = alchemy_url('olhosdamata db')
# Build the "engine" for connecting to the SQL server, using the URL for the database.
engine = create_engine(url)
# Create the tables
Base.metadata.create_all(engine)
# Get SQLAlchemy ready
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)()
# Perform any necessary database schema updates using alembic, if there is an alembic
# directory and alembic.ini file in the package.
upgrade_db(url, __file__, engine)
# Define Bank as both a DAObject and SQLObject
class Processo(Person, SQLObject):
# This tells the SQLObject code what the SQLAlchemy model is
_model = ProcessoModel
# This tells the SQLObject code how to access the database
_session = DBSession
# This indicates that an object is not ready to be saved to SQL unless the "name" column is defined
_required = ['name']
# This indicates that the human-readable unique identifier for the table is the column "routing"
_uid = 'routing'
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
# This runs necessary SQLObject initialization code for the instance
self.sql_init()
# The db_get function specifies how to get attributes from the DAObject for purposes of setting SQL column values
def db_get(self, column):
if column == 'name':
return self.name.text
elif column == 'routing':
return self.routing
# The db_set function specifies how to set attributes of the DAObject on the basis of non-null SQL column values
def db_set(self, column, value):
if column == 'name':
self.name.text = value
elif column == 'routing':
self.routing = value
# The db_null function specifies how to delete attributes of the DAObject when the SQL column value becomes null
def db_null(self, column):
if column == 'name':
del self.name.text
elif column == 'routing':
del self.routing
# This is an example of a method that uses SQLAlchemy to return True or False
def has_requerido(self, requerido):
if not (self.ready() and requerido.ready()):
raise Exception("has_requerido: cannot retrieve data")
# this opens a connection to the SQL database
db_entry = self._session.query(ProcessoRequeridoModel).filter(ProcessoRequeridoModel.processo_id == self.id, ProcessoRequeridoModel.requerido_id == requerido.id).first()
if db_entry is None:
return False
return True
# This is an example of a method that uses SQLAlchemy to add a record to the BankCustomer SQL table
# to indicate that a bank has a customer. Note that it is designed to be idempotent; it will not add
# a duplicate record.
def add_requerido(self, requerido):
if not self.has_requerido(requerido):
db_entry = ProcessoRequeridoModel(processo_id=self.id, requerido_id=requerido.id)
self._session.add(db_entry)
self._session.commit()
# This is an example of a method that uses SQLAlchemy to return a list of Customer objects.
# It uses the by_id() class method to return a Customer object for the given id.
def get_requerido(self):
if not self.ready():
raise Exception("get_requerido: cannot retrieve data")
results = list()
for db_entry in self._session.query(ProcessoRequeridoModel).filter(ProcessoRequeridoModel.processo_id == self.id).all():
results.append(Requerido.by_id(db_entry.requerido_id))
return results
# This is an example of a method that uses SQLAlchemy to delete a bank-customer relationship
def del_requerido(self, requerido):
if not (self.ready() and requerido.ready()):
raise Exception("del_requerido: cannot retrieve data")
self._session.query(ProcessoRequeridoModel).filter(ProcessoRequeridoModel.processo_id == self.id, ProcessoRequeridoModel.requerido_id == requerido.id).delete()
self._session.commit()
class Requerido(Individual, SQLObject):
_model = RequeridoModel
_session = DBSession
_required = ['first_name']
_uid = 'cpf'
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.sql_init()
def db_get(self, column):
if column == 'cpf':
return self.cpf
elif column == 'first_name':
return self.name.first
elif column == 'last_name':
return self.name.last
elif column == 'address':
return self.address.address
elif column == 'unit':
return self.address.unit
elif column == 'city':
return self.address.city
elif column == 'state':
return self.address.state
elif column == 'zip':
return self.address.zip
#Added fields:
elif column == 'nacionality':
return self.address.nacionality
elif column == 'capacity':
return self.address.capacity
elif column == 'legal_personal_representative':
return self.address.legal_personal_representative
elif column == 'marital_status':
return self.address.marital_status
elif column == 'city_of_birth':
return self.address.city_of_birth
elif column == 'birth_date':
return self.address.birth_date
elif column == 'profession':
return self.address.profession
elif column == 'identity_document':
return self.address.identity_document
elif column == 'fathers_name':
return self.address.fathers_name
elif column == 'mothers_name':
return self.address.mothers_name
elif column == 'phone_number':
return self.address.phone_number
# elif column == 'uses_whatsapp':
# return self.address.uses_whatsapp
def db_set(self, column, value):
if column == 'cpf':
self.cpf = value
elif column == 'first_name':
self.name.first = value
elif column == 'last_name':
self.name.last = value
elif column == 'address':
self.address.address = value
elif column == 'unit':
self.address.unit = value
elif column == 'city':
self.address.city = value
elif column == 'state':
self.address.state = value
elif column == 'zip':
self.address.zip = value
elif column == 'nacionality':
self.address.nacionality = value
elif column == 'capacity':
self.address.capacity = value
elif column == 'legal_personal_representative':
self.address.legal_personal_representative = value
elif column == 'marital_status':
self.address.marital_status = value
elif column == 'city_of_birth':
self.address.city_of_birth = value
elif column == 'birth_date':
self.address.birth_date = value
elif column == 'profession':
self.address.profession = value
elif column == 'identity_document':
self.address.identity_document = value
elif column == 'fathers_name':
self.address.fathers_name = value
elif column == 'mothers_name':
self.address.mothers_name = value
elif column == 'phone_number':
self.address.phone_number = value
# elif column == 'uses_whatsapp':
# self.address.uses_whatsapp = value
def db_null(self, column):
if column == 'cpf':
del self.cpf
elif column == 'first_name':
del self.name.first
elif column == 'last_name':
del self.name.last
elif column == 'address':
del self.address.address
elif column == 'unit':
del self.address.unit
elif column == 'city':
del self.address.city
elif column == 'state':
del self.address.state
elif column == 'zip':
del self.address.zip
elif column == 'nacionality':
del self.address.nacionality
elif column == 'capacity':
del self.address.capacity
elif column == 'legal_personal_representative':
del self.address.legal_personal_representative
elif column == 'marital_status':
del self.address.marital_status
elif column == 'city_of_birth':
del self.address.city_of_birth
elif column == 'birth_date':
del self.address.birth_date
elif column == 'profession':
del self.address.profession
elif column == 'identity_document':
del self.address.identity_document
elif column == 'fathers_name':
del self.address.fathers_name
elif column == 'mothers_name':
del self.address.mothers_name
elif column == 'phone_number':
del self.address.phone_number
# elif column == 'uses_whatsapp':
# del self.address.uses_whatsapp
class ProcessoRequerido(DAObject, SQLObjectRelationship):
_model = ProcessoRequeridoModel
_session = DBSession
_parent = [Processo, 'processo', 'processo_id']
_child = [Requerido, 'requerido', 'requerido_id']
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.rel_init(*pargs, **kwargs)
def db_get(self, column):
if column == 'processo_id':
return self.processo.id
elif column == 'requerido_id':
return self.requerido.id
def db_set(self, column, value):
if column == 'processo_id':
self.bank = Processo.by_id(value)
elif column == 'requerido_id':
self.customer = Requerido.by_id(value)
# A db_find_existing method is defined here because the default db_find_existing() method for
# the SQLObject class tries to find existing records based on a unique identifier column indicated
# by the _uid attribute. Since the unique identifier for a bank-customer relationship record is
# not a single column, but rather the combination of bank ID and customer ID, there is no _uid
# column for the default db_find_existing() method to use. But we can write our own method for
# how to locate an existing record based on Python object attributes (.bank.id and .customer.id).
def db_find_existing(self):
try:
return | |
from app import db
from models import User, Message, WeekSchedule, UserSchedule, Food, DaysOfSchedule
import random
db.drop_all()
db.create_all()
# These are users
admin = User(name='Admin', email='<EMAIL>', password='<PASSWORD>')
adonay = User(name='Adonay', email='<EMAIL>', password='<PASSWORD>')
mark = User(name='Mark', email='<EMAIL>', password='<PASSWORD>')
john = User(name='John', email='<EMAIL>', password='<PASSWORD>')
gary = User(name='Gary', email='<EMAIL>', password='<PASSWORD>')
bev = User(name='Bev', email='<EMAIL>', password='<PASSWORD>')
tracy = User(name='Tracy', email='<EMAIL>', password='<PASSWORD>')
admin.admin = True
db.session.add(adonay)
db.session.add(mark)
db.session.add(john)
db.session.add(admin)
db.session.commit()
#####################
# These are messages
####################
db.session.add(
Message(
title='Every body please read this',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='All Shifts',
owner=adonay
)
)
db.session.add(
Message(
title='Lost Credit Card',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='All Shifts',
owner=adonay
)
)
db.session.add(
Message(
title='Completed',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='Afternoon',
owner=adonay
)
)
db.session.add(
Message(
title='To be Edited',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='Evening',
owner=adonay
)
)
db.session.add(
Message(
title='To be Deleted',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='Morning',
owner=adonay
)
)
db.session.add(
Message(
title='Every body please read this',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='All Shifts',
owner=adonay
)
)
db.session.add(
Message(
title='Lost Wallet',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='All Shifts',
owner=adonay
)
)
db.session.add(
Message(
title='Donut Order',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='Afternoon',
owner=adonay
)
)
db.session.add(
Message(
title='Lost ID',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='Evening',
owner=adonay
)
)
db.session.add(
Message(
title='Price of Monster change',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='Morning',
owner=adonay
)
)
db.session.add(
Message(
title='By Mark',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='All Shifts',
owner=mark
)
)
db.session.add(
Message(
title='<NAME>',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='All Shifts',
owner=mark
)
)
db.session.add(
Message(
title='<NAME>',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='All Shifts',
owner=mark
)
)
db.session.add(
Message(
title='Mark to Morning',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='Morning',
owner=mark
)
)
db.session.add(
Message(
title='Mark to Evening',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='Afternoon',
owner=mark
)
)
db.session.add(
Message(
title='Mark to Afternoon',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='Evening',
owner=mark
)
)
db.session.add(
Message(
title='By John',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='All Shifts',
owner=john
)
)
db.session.add(
Message(
title='John Urgent',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='All Shifts',
owner=john
)
)
db.session.add(
Message(
title='John General',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='All Shifts',
owner=john
)
)
db.session.add(
Message(
title='John to Morning',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='Morning',
owner=john
)
)
db.session.add(
Message(
title='John to Evening',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='Afternoon',
owner=john
)
)
db.session.add(
Message(
title='John to Afternoon',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='Evening',
owner=john
)
)
db.session.add(
Message(
title='By Tracy',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='All Shifts',
owner=tracy
)
)
db.session.add(
Message(
title='Tracy Urgent',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='All Shifts',
owner=tracy
)
)
db.session.add(
Message(
title='Tracy General',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
shift='All Shifts',
owner=tracy
)
)
db.session.add(
Message(
title='Tracy to Morning',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='Morning',
owner=tracy
)
)
db.session.add(
Message(
title='Tracy to Evening',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='Urgent',
shift='Afternoon',
owner=tracy
)
)
db.session.add(
Message(
title='Tracy to Afternoon',
content='Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.',
category='General',
| |
<filename>tensorbay/client/dataset.py
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
"""The remote dataset on TensorBay."""
import functools
import logging
import os
import shutil
import tempfile
from typing import TYPE_CHECKING, Any, Dict, Generator, Iterable, Iterator, Optional, Tuple, Union
from ulid import ULID, from_timestamp
from tensorbay.client.diff import DataDiff, DatasetDiff, SegmentDiff
from tensorbay.client.lazy import PagingList
from tensorbay.client.log import (
CACHE_SPACE_WARNING,
UPLOAD_SEGMENT_RESUME_TEMPLATE_CLI,
UPLOAD_SEGMENT_RESUME_TEMPLATE_SDK,
)
from tensorbay.client.requests import multithread_upload
from tensorbay.client.segment import _STRATEGIES, FusionSegmentClient, SegmentClient
from tensorbay.client.statistics import Statistics
from tensorbay.client.status import Status
from tensorbay.client.version import SquashAndMerge, VersionControlMixin
from tensorbay.dataset import AuthData, Data, Frame, FusionSegment, Notes, RemoteData, Segment
from tensorbay.exception import (
FrameError,
InvalidParamsError,
NameConflictError,
ResourceNotExistError,
StatusError,
)
from tensorbay.label import Catalog
from tensorbay.utility import Tqdm
if TYPE_CHECKING:
from tensorbay.client.gas import GAS
logger = logging.getLogger(__name__)
class DatasetClientBase(VersionControlMixin):
"""This class defines the basic concept of the dataset client.
A :class:`DatasetClientBase` contains the information needed for
determining a unique dataset on TensorBay, and provides a series of
method within dataset scope, such as :meth:`DatasetClientBase.list_segment_names`
and :meth:`DatasetClientBase.upload_catalog`.
Arguments:
name: Dataset name.
dataset_id: Dataset ID.
gas: The initial client to interact between local and TensorBay.
status: The version control status of the dataset.
alias: Dataset alias.
is_public: Whether the dataset is public.
Attributes:
name: Dataset name.
dataset_id: Dataset ID.
status: The version control status of the dataset.
"""
def __init__(
self,
name: str,
dataset_id: str,
gas: "GAS",
*,
status: Status,
alias: str,
is_public: bool,
) -> None:
self._name = name
self._dataset_id = dataset_id
self._client = gas._client
self._status = status
self._alias = alias
self._is_public = is_public
self._cache_path: str = ""
def _create_segment(self, name: str) -> None:
post_data: Dict[str, Any] = {"name": name}
post_data.update(self._status.get_status_info())
self._client.open_api_do("POST", "segments", self._dataset_id, json=post_data)
def _list_segments(self, offset: int = 0, limit: int = 128) -> Dict[str, Any]:
params: Dict[str, Any] = self._status.get_status_info()
params["offset"] = offset
params["limit"] = limit
response = self._client.open_api_do("GET", "segments", self._dataset_id, params=params)
return response.json() # type: ignore[no-any-return]
def _generate_segment_names(
self, offset: int = 0, limit: int = 128
) -> Generator[str, None, int]:
response = self._list_segments(offset, limit)
for item in response["segments"]:
yield item["name"]
return response["totalCount"] # type: ignore[no-any-return]
def _copy_segment(
self,
source_name: str,
target_name: str,
*,
source_client: Union[None, "DatasetClient", "FusionDatasetClient"],
strategy: str = "abort",
) -> None:
if strategy not in _STRATEGIES:
raise InvalidParamsError(param_name="strategy", param_value=strategy)
source = {"segmentName": source_name}
if not source_client:
if source_name == target_name:
raise ValueError("Copying the segment to the same location is not allowed")
else:
source["id"] = source_client.dataset_id
source.update(source_client.status.get_status_info())
self._status.check_authority_for_draft()
post_data: Dict[str, Any] = {
"strategy": strategy,
"source": source,
"segmentName": target_name,
}
post_data.update(self._status.get_status_info())
self._client.open_api_do("POST", "segments?copy", self._dataset_id, json=post_data)
def _move_segment(
self,
source_name: str,
target_name: str,
*,
strategy: str = "abort",
) -> None:
self._status.check_authority_for_draft()
if strategy not in _STRATEGIES:
raise InvalidParamsError(param_name="strategy", param_value=strategy)
post_data: Dict[str, Any] = {
"strategy": strategy,
"source": {"segmentName": source_name},
"segmentName": target_name,
}
post_data.update(self._status.get_status_info())
self._client.open_api_do("POST", "segments?move", self._dataset_id, json=post_data)
@property
def dataset_id(self) -> str:
"""Return the TensorBay dataset ID.
Returns:
The TensorBay dataset ID.
"""
return self._dataset_id
@property
def status(self) -> Status:
"""Return the status of the dataset client.
Returns:
The status of the dataset client.
"""
return self._status
@property
def name(self) -> str:
"""Return the TensorBay dataset name.
Returns:
The TensorBay dataset name.
"""
return self._name
@property
def alias(self) -> str:
"""Return the TensorBay dataset alias.
Returns:
The TensorBay dataset alias.
"""
return self._alias
@property
def is_public(self) -> bool:
"""Return whether the dataset is public.
Returns:
Whether the dataset is public.
"""
return self._is_public
@property
def cache_enabled(self) -> bool:
"""Whether the cache is enabled.
Returns:
Whether the cache is enabled.
"""
return bool(self._cache_path) and not self.status.is_draft
@property # type: ignore[misc]
@functools.lru_cache()
def squash_and_merge(self) -> SquashAndMerge:
"""Get class :class:`~tensorbay.client.version.SquashAndMerge`.
Returns:
Required :class:`~tensorbay.client.version.SquashAndMerge`.
"""
return SquashAndMerge(self._client, self._dataset_id, self._status, self.get_draft)
def enable_cache(self, cache_path: str = "") -> None:
"""Enable cache when open the remote data of the dataset.
Arguments:
cache_path: The path to store the cache.
Raises:
StatusError: When enable cache under draft status.
"""
try:
self.status.check_authority_for_commit()
except StatusError as error:
raise StatusError("Cache is not available for datasets under draft status") from error
if cache_path:
self._cache_path = os.path.join(
os.path.abspath(os.path.expanduser(cache_path)), self.dataset_id
)
else:
self._cache_path = os.path.join(tempfile.gettempdir(), "tensorbay", self.dataset_id)
total_size = self.get_total_size()
print(
"To cache the entire dataset, "
f"please make sure there is free storage space larger than {total_size} bytes.\n"
"Note that cache will not work for datasets under draft status.\n\n"
f'The cache will be stored under "{self._cache_path}".\n'
"You can remove all the files after using."
)
os.makedirs(self._cache_path, exist_ok=True)
_, _, free = shutil.disk_usage(self._cache_path)
if free < total_size:
logger.warning(CACHE_SPACE_WARNING, free, total_size)
def update_notes(
self,
*,
is_continuous: Optional[bool] = None,
bin_point_cloud_fields: Union[Iterable[str], None] = ..., # type: ignore[assignment]
) -> None:
"""Update the notes.
Arguments:
is_continuous: Whether the data is continuous.
bin_point_cloud_fields: The field names of the bin point cloud files in the dataset.
"""
self._status.check_authority_for_draft()
patch_data: Dict[str, Any] = {}
if is_continuous is not None:
patch_data["isContinuous"] = is_continuous
if bin_point_cloud_fields is None:
patch_data["binPointCloudFields"] = bin_point_cloud_fields
elif bin_point_cloud_fields is not ...: # type: ignore[comparison-overlap]
patch_data["binPointCloudFields"] = list(bin_point_cloud_fields)
patch_data.update(self._status.get_status_info())
self._client.open_api_do("PATCH", "notes", self._dataset_id, json=patch_data)
def get_notes(self) -> Notes:
"""Get the notes.
Returns:
The :class:`~tensorbay.dataset.dataset.Notes`.
"""
params: Dict[str, Any] = self._status.get_status_info()
return Notes.loads(
self._client.open_api_do("GET", "notes", self._dataset_id, params=params).json()
)
def list_segment_names(self) -> PagingList[str]:
"""List all segment names in a certain commit.
Returns:
The PagingList of segment names.
"""
return PagingList(self._generate_segment_names, 128)
def get_catalog(self) -> Catalog:
"""Get the catalog of the certain commit.
Returns:
Required :class:`~tensorbay.label.catalog.Catalog`.
"""
params: Dict[str, Any] = self._status.get_status_info()
response = self._client.open_api_do(
"GET", "labels/catalogs", self._dataset_id, params=params
).json()
return Catalog.loads(response["catalog"])
def upload_catalog(self, catalog: Catalog) -> None:
"""Upload a catalog to the draft.
Arguments:
catalog: :class:`~tensorbay.label.catalog.Catalog` to upload.
"""
self._status.check_authority_for_draft()
put_data: Dict[str, Any] = {"catalog": catalog.dumps()}
put_data.update(self._status.get_status_info())
self._client.open_api_do("PUT", "labels/catalogs", self._dataset_id, json=put_data)
def delete_segment(self, name: str) -> None:
"""Delete a segment of the draft.
Arguments:
name: Segment name.
"""
self._status.check_authority_for_draft()
delete_data: Dict[str, Any] = {"segmentName": name}
delete_data.update(self._status.get_status_info())
self._client.open_api_do("DELETE", "segments", self._dataset_id, json=delete_data)
def get_label_statistics(self) -> Statistics:
"""Get label statistics of the dataset.
Returns:
Required :class:`~tensorbay.client.dataset.Statistics`.
"""
params: Dict[str, Any] = self._status.get_status_info()
return Statistics(
self._client.open_api_do(
"GET", "labels/statistics", self._dataset_id, params=params
).json()["labelStatistics"]
)
def get_total_size(self) -> int:
"""Get total data size of the dataset and the unit is byte.
Returns:
The total data size of the dataset.
"""
self._status.check_authority_for_commit()
params: Dict[str, Any] = {"commit": self._status.commit_id}
return self._client.open_api_do( # type: ignore[no-any-return]
"GET", "total-size", self._dataset_id, params=params
).json()["totalSize"]
class DatasetClient(DatasetClientBase):
"""This class defines :class:`DatasetClient`.
:class:`DatasetClient` inherits from :class:`DataClientBase` and
provides more methods within a dataset scope, such as :meth:`DatasetClient.get_segment`,
:meth:`DatasetClient.commit <DatasetClientBase.commit>` and
:meth:`DatasetClient.upload_segment`.
In contrast to :class:`FusionDatasetClient`, a
:class:`DatasetClient` has only one sensor.
"""
def _generate_segments(
self, offset: int = 0, limit: int = 128
) -> Generator[Segment, None, int]:
response = self._list_segments(offset, limit)
for item in response["segments"]:
segment = Segment._from_client( # pylint: disable=protected-access
SegmentClient(item["name"], self)
)
segment.description = item["description"]
yield segment
return response["totalCount"] # type: ignore[no-any-return]
def _generate_segment_diffs(
self, basehead: str, offset: int = 0, limit: int = 128
) -> Generator[SegmentDiff, None, int]:
params: Dict[str, Any] = {"offset": offset, "limit": limit}
response = self._client.open_api_do(
"GET", f"diffs/{basehead}/segments", self._dataset_id, params=params
).json()
for segment_diff_response in response["segments"]:
segment_name = segment_diff_response["name"]
data_diffs = self._list_data_diffs(basehead, segment_name)
segment_diff = SegmentDiff(segment_name, segment_diff_response["action"], data_diffs)
yield segment_diff
return response["totalCount"] # type: ignore[no-any-return]
def _generate_data_diffs(
self, basehead: str, segment_name: str, offset: int = 0, limit: int = 128
) -> Generator[DataDiff, None, int]:
params: Dict[str, Any] = {"offset": offset, "limit": limit}
response = self._client.open_api_do(
"GET", f"diffs/{basehead}/segments/{segment_name}/data", self._dataset_id, params=params
).json()
for data in response["data"]:
yield DataDiff.loads(data)
return response["totalCount"] # type: ignore[no-any-return]
def _list_data_diffs(self, basehead: str, segment_name: str) -> PagingList[DataDiff]:
return PagingList(
lambda offset, limit: self._generate_data_diffs(basehead, segment_name, offset, limit),
128,
)
def _list_segment_instances(self) -> PagingList[Segment]:
return PagingList(self._generate_segments, 128)
def _upload_segment(
self,
segment: Segment,
*,
jobs: int = 1,
skip_uploaded_files: bool = False,
pbar: Tqdm,
) -> SegmentClient:
segment_client = self.get_or_create_segment(segment.name)
all_data: Iterator[Union[AuthData, Data]] = filter(
lambda data: pbar.update_for_skip(not isinstance(data, RemoteData)),
segment, # type: ignore[arg-type]
)
if not skip_uploaded_files:
segment_filter = all_data
else:
done_set = set(segment_client.list_data_paths())
segment_filter = filter(
lambda data: pbar.update_for_skip(data.target_remote_path not in done_set),
all_data,
)
multithread_upload(
# pylint: disable=protected-access
segment_client._upload_or_import_data,
segment_filter,
callback=segment_client._synchronize_upload_info,
jobs=jobs,
pbar=pbar,
)
return segment_client
def get_or_create_segment(self, name: str = "default") -> SegmentClient:
"""Get or create a segment with the given name.
Arguments:
name: The name of the fusion segment.
Returns:
The created | |
0, np.pi) * (2 * l + 1) / (2 * l * (l + 1)) * a**2 / psi(l, kI * a) * kI**2 / (l * (l + 1))
mBL[ii] = np.sqrt(2) * quadl(lambda th: eHriL(a, th, np.pi / 4) * legendrePl(l, np.cos(
th)) * np.sin(th), 0, np.pi) * (2 * l + 1) / (2 * l * (l + 1)) * a**2 / psi(l, kI * a) * kI**2 / (l * (l + 1))
eBR[ii] = np.sqrt(2) * quadl(lambda th: eEriR(a, th, np.pi / 4) * legendrePl(l, np.cos(
th)) * np.sin(th), 0, np.pi) * (2 * l + 1) / (2 * l * (l + 1)) * a**2 / psi(l, kI * a) * kI**2 / (l * (l + 1))
mBR[ii] = np.sqrt(2) * quadl(lambda th: eHriR(a, th, np.pi / 4) * legendrePl(l, np.cos(
th)) * np.sin(th), 0, np.pi) * (2 * l + 1) / (2 * l * (l + 1)) * a**2 / psi(l, kI * a) * kI**2 / (l * (l + 1))
# make symetrical with left expansion coefficients
eBR[ii] = eBR[ii] * (-1)**(l - 1)
# make symetrical with left expansion coefficients
mBR[ii] = mBR[ii] * (-1)**l
# coefficients for internal fields (eCl, mCl) and scattered fields (eDl,
# mDl)
eCL = np.zeros(lmax, dtype=complex)
mCL = np.zeros(lmax, dtype=complex)
eCR = np.zeros(lmax, dtype=complex)
mCR = np.zeros(lmax, dtype=complex)
eDL = np.zeros(lmax, dtype=complex)
mDL = np.zeros(lmax, dtype=complex)
eDR = np.zeros(lmax, dtype=complex)
mDR = np.zeros(lmax, dtype=complex)
for ii in range(lmax):
l = ii + 1
# internal (left and right)
eCL[ii] = k1I / kI * (kII)**2 * (xi(l, kI * a) * psi1(l, kI * a) - xi1(l, kI * a) * psi(l, kI * a)) / (
k1I * kII * xi(l, kI * a) * psi1(l, kII * a) - k1II * kI * xi1(l, kI * a) * psi(l, kII * a)) * eBL[ii]
mCL[ii] = k2I / kI * (kII)**2 * (xi(l, kI * a) * psi1(l, kI * a) - xi1(l, kI * a) * psi(l, kI * a)) / (
k2I * kII * xi(l, kI * a) * psi1(l, kII * a) - k2II * kI * xi1(l, kI * a) * psi(l, kII * a)) * mBL[ii]
eCR[ii] = k1I / kI * (kII)**2 * (xi(l, kI * a) * psi1(l, kI * a) - xi1(l, kI * a) * psi(l, kI * a)) / (
k1I * kII * xi(l, kI * a) * psi1(l, kII * a) - k1II * kI * xi1(l, kI * a) * psi(l, kII * a)) * eBR[ii]
mCR[ii] = k2I / kI * (kII)**2 * (xi(l, kI * a) * psi1(l, kI * a) - xi1(l, kI * a) * psi(l, kI * a)) / (
k2I * kII * xi(l, kI * a) * psi1(l, kII * a) - k2II * kI * xi1(l, kI * a) * psi(l, kII * a)) * mBR[ii]
# scattered (left and right)
eDL[ii] = (k1I * kII * psi(l, kI * a) * psi1(l, kII * a) - k1II * kI * psi1(l, kI * a) * psi(l, kII * a)) / \
(k1II * kI * xi1(l, kI * a) * psi(l, kII * a) - k1I *
kII * xi(l, kI * a) * psi1(l, kII * a)) * eBL[ii]
mDL[ii] = (k2I * kII * psi(l, kI * a) * psi1(l, kII * a) - k2II * kI * psi1(l, kI * a) * psi(l, kII * a)) / \
(k2II * kI * xi1(l, kI * a) * psi(l, kII * a) - k2I *
kII * xi(l, kI * a) * psi1(l, kII * a)) * mBL[ii]
eDR[ii] = (k1I * kII * psi(l, kI * a) * psi1(l, kII * a) - k1II * kI * psi1(l, kI * a) * psi(l, kII * a)) / \
(k1II * kI * xi1(l, kI * a) * psi(l, kII * a) - k1I *
kII * xi(l, kI * a) * psi1(l, kII * a)) * eBR[ii]
mDR[ii] = (k2I * kII * psi(l, kI * a) * psi1(l, kII * a) - k2II * kI * psi1(l, kI * a) * psi(l, kII * a)) / \
(k2II * kI * xi1(l, kI * a) * psi(l, kII * a) - k2I *
kII * xi(l, kI * a) * psi1(l, kII * a)) * mBR[ii]
# First Order Expansion Coefficients
# coefficients for internal fields (eCcl, mCcl) and scattered fields
# (eDdl, mDdl)
eLambda1L = {}
mLambda1L = {}
eLambda2L = {}
mLambda2L = {}
eLambda3L = {}
mLambda3L = {}
eLambda1R = {}
mLambda1R = {}
eLambda2R = {}
mLambda2R = {}
eLambda3R = {}
mLambda3R = {}
for jj in range(lmax):
l = jj + 1
# left
eLambda1L[l] = lambda x, l=l, jj=jj: (eBL[jj] / kI * psi1ex(l, kI * a, x) - eCL[jj] / kII * psi1ex(
l, kII * a, x) + eDL[jj] / kI * xi1ex(l, kI * a, x)) # electric parameter1 left
mLambda1L[l] = lambda x, l=l, jj=jj: (mBL[jj] / kI * psi1ex(l, kI * a, x) - mCL[jj] / kII * psi1ex(
l, kII * a, x) + mDL[jj] / kI * xi1ex(l, kI * a, x)) # magnetic parameter1 left
eLambda2L[l] = lambda x, l=l, jj=jj: (k1I / kI**2 * eBL[jj] * psiex(l, kI * a, x) - k1II / kII**2 * eCL[jj] * psiex(
l, kII * a, x) + k1I / kI**2 * eDL[jj] * xiex(l, kI * a, x)) # electric parameter2 left
mLambda2L[l] = lambda x, l=l, jj=jj: (k2I / kI**2 * mBL[jj] * psiex(l, kI * a, x) - k2II / kII**2 * mCL[jj] * psiex(
l, kII * a, x) + k2I / kI**2 * mDL[jj] * xiex(l, kI * a, x)) # magnetic parameter2 left
eLambda3L[l] = lambda x, l=l, jj=jj: (eBL[jj] * (psiex(l, kI * a, x) + psi2ex(l, kI * a, x)) * k1I - eCL[jj] * (psiex(l, kII * a, x) + psi2ex(l, kII * a, x)) * k1II
+ eDL[jj] * (xiex(l, kI * a, x) + xi2ex(l, kI * a, x)) * k1I) # electric parameter3 left
mLambda3L[l] = lambda x, l=l, jj=jj: (mBL[jj] * (psiex(l, kI * a, x) + psi2ex(l, kI * a, x)) * MuI - mCL[jj] * (psiex(l, kII * a, x) + psi2ex(l, kII * a, x)) * MuII
+ mDL[jj] * (xiex(l, kI * a, x) + xi2ex(l, kI * a, x)) * MuI) # magnetic parameter3 left
# right
eLambda1R[l] = lambda x, l=l, jj=jj: (eBR[jj] / kI * psi1ex(l, kI * a, x) - eCR[jj] / kII * psi1ex(
l, kII * a, x) + eDR[jj] / kI * xi1ex(l, kI * a, x)) # electric parameter1 right
mLambda1R[l] = lambda x, l=l, jj=jj: (mBR[jj] / kI * psi1ex(l, kI * a, x) - mCR[jj] / kII * psi1ex(
l, kII * a, x) + mDR[jj] / kI * xi1ex(l, kI * a, x)) # magnetic parameter1 right
eLambda2R[l] = lambda x, l=l, jj=jj: (k1I / kI**2 * eBR[jj] * psiex(l, kI * a, x) - k1II / kII**2 * eCR[jj] * psiex(
l, kII * a, x) + k1I / kI**2 * eDR[jj] * xiex(l, kI * a, x)) # electric parameter2 right
mLambda2R[l] = lambda x, l=l, jj=jj: (k2I / kI**2 * mBR[jj] * psiex(l, kI * a, x) - k2II / kII**2 * mCR[jj] * psiex(
l, kII * a, x) + k2I / kI**2 * mDR[jj] * xiex(l, kI * a, x)) # magnetic parameter2 right
eLambda3R[l] = lambda x, l=l, jj=jj: (eBR[jj] * (psiex(l, kI * a, x) + psi2ex(l, kI * a, | |
import tensorflow as tf
import math
import numpy as np
import os
import json
from utils import create_cell_starts, normalize_image, colors_to_int, COLOR_DIMS
DIR = os.path.join('.', 'dataset', 'processed')
class Dataset:
def __init__(self, image_size, grid_size, validation_split=0.15, max_crop=0.1,
saturation=0.5, exposure=0.2, noise_dev=0.1,
hue=0.1):
self.image_size = image_size
self.grid_size = grid_size
self.validation_split = validation_split
self.max_crop = max_crop
self.saturation = saturation
self.exposure = exposure
self.noise_dev = noise_dev
self.hue = hue
self.images = [
os.path.join(DIR, f)
for f in os.listdir(DIR)
if f.endswith('.jpg')
]
self.base_hashes = sorted(
list(set([ f.split('_', 1)[0] for f in self.images ])))
self.polygons = []
self.colors = []
max_polys = 0
for image_file in self.images:
json_file = image_file[:-4] + '.json'
with open(json_file, 'r') as f:
raw_labels = json.load(f)
image_polys = []
image_colors = []
for resistor in raw_labels['resistors']:
poly = resistor['polygon']
colors = resistor['colors']
if len(poly) != 4 or len(colors) != 6:
continue
poly_points = [ [ float(p['x']), float(p['y']) ] for p in poly ]
image_polys.append(poly_points)
image_colors.append(colors_to_int(colors))
self.polygons.append(image_polys)
self.colors.append(image_colors)
max_polys = max(max_polys, len(image_polys))
# Pad polygons
for image_polys in self.polygons:
while (len(image_polys) < max_polys):
image_polys.append(4 * [ [ -1000.0, -1000.0 ] ])
for image_colors in self.colors:
while (len(image_colors) < max_polys):
image_colors.append(6 * [ 0 ])
# Just to have stable shape for empty validation data
self.polygons = np.array(self.polygons)
self.colors = np.array(self.colors)
def load(self):
validation_count = int(len(self.base_hashes) * self.validation_split)
validation_hashes = set(self.base_hashes[:validation_count])
validation_images = []
validation_indices = []
training_images = []
training_indices = []
for i, image in enumerate(self.images):
if image.split('_', 1)[0] in validation_hashes:
validation_images.append(image)
validation_indices.append(i)
else:
training_images.append(image)
training_indices.append(i)
print('Training dataset has {} images'.format(len(training_images)))
print('Validation dataset has {} images'.format(len(validation_images)))
# Do this trick to preserve shape
training_polygons = self.polygons[training_indices]
training_colors = self.colors[training_indices]
validation_polygons = self.polygons[validation_indices]
validation_colors = self.colors[validation_indices]
validation = self.load_single(validation_images, validation_polygons, \
validation_colors)
training = self.load_single(training_images, training_polygons, \
training_colors)
training = training.map(lambda img, polys, colors: \
self.process_image(img, polys, colors, True))
validation = validation.map(lambda img, polys, colors: \
self.process_image(img, polys, colors, False))
return training, validation
def load_single(self, images, polygons, colors):
dataset = tf.data.Dataset.from_tensor_slices( \
(tf.constant(images, dtype=tf.string), \
tf.constant(polygons, dtype=tf.float32), \
tf.constant(colors, dtype=tf.int32),))
dataset = dataset.map(lambda img, polys, colors: \
(self.load_image(img), polys, colors,))
dataset.cache()
return dataset.shuffle(buffer_size=10000)
def load_image(self, image):
image = tf.read_file(image)
image = tf.image.decode_jpeg(image, channels=3)
return image
def process_image(self, image, polygons, colors, training):
#
# Do a major crop to fit image into a square
#
image, polygons = self.major_crop(image, polygons, training)
if training:
#
# Do a rotation on full-size image
#
image, polygons = self.random_rotate(image, polygons)
#
# Do a minor crop
#
image, polygons = self.minor_crop(image, polygons)
#
# Resize all images to target size
#
crop_size = tf.shape(image)[0]
check_size = tf.assert_greater_equal(crop_size, self.image_size)
with tf.control_dependencies([ check_size ]):
image = tf.image.resize_images(image,
[ self.image_size, self.image_size ],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
polygons = polygons * float(self.image_size) / \
tf.cast(crop_size, dtype=tf.float32)
#
# Change image's type and value range
#
image = tf.cast(image, dtype=tf.float32)
image /= 255.0
#
# Color/exposure manipulation, rotation
#
if training:
image = tf.image.rgb_to_hsv(image)
# Color
h, s, v = tf.split(image, [ 1, 1, 1 ], axis=-1)
# Saturation
saturation_coeff = tf.random_uniform([], 1.0 - self.saturation,
1.0 + self.saturation)
s *= saturation_coeff
s = tf.clip_by_value(s, 0.0, 1.0)
# Exposure
exposure_coeff = tf.random_uniform([], 1.0 - self.exposure,
1.0 + self.exposure)
v *= exposure_coeff
v = tf.clip_by_value(v, 0.0, 1.0)
image = tf.concat([ h, s, v ], axis=-1)
image = tf.image.hsv_to_rgb(image)
# TODO(indutny): change hue above too
image = tf.image.random_hue(image, self.hue)
# Rotation
rot_count = tf.random_uniform([], 0, 4, dtype=tf.int32)
image = tf.image.rot90(image, rot_count)
polygons = self.rot90_polygons(image, polygons, rot_count)
#
# Add gaussian noise
#
if training:
image += tf.random_normal(shape=tf.shape(image), stddev=self.noise_dev)
image = tf.clip_by_value(image, 0.0, 1.0)
image = normalize_image(image)
polygons, colors = self.filter_polygons(polygons, colors, self.image_size)
return image, self.create_grid(polygons, colors)
def random_rotate(self, image, polygons):
angle = tf.random_uniform([], 0.0, math.pi / 2.0)
image = tf.contrib.image.rotate(image, angle)
polygons = self.rotate_polygons(image, polygons, angle)
return image, polygons
def major_crop(self, image, polygons, training):
size = tf.shape(image)[:2]
width = size[1]
height = size[0]
crop_size = tf.reduce_min(size, axis=-1, name='crop_size')
size_delta = size - crop_size
if training:
# Random crop for training
crop_off = tf.cast(size_delta, dtype=tf.float32) * \
tf.random_uniform([ 2 ])
crop_off = tf.cast(crop_off, dtype=tf.int32)
else:
# Central crop for validation
crop_off = size_delta // 2
image = tf.image.crop_to_bounding_box(image, crop_off[0], crop_off[1], \
crop_size, crop_size)
polygons = self.crop_polygons(polygons, crop_off, crop_size)
return image, polygons
def minor_crop(self, image, polygons):
width = tf.shape(image)[0]
float_width = tf.cast(width, dtype=tf.float32)
max_allowed_crop = 1 - self.image_size / (float_width + 1e-23)
max_allowed_crop /= 2.0
max_allowed_crop = tf.minimum(self.max_crop, max_allowed_crop)
crop_off = tf.random_uniform([], 0.0, max_allowed_crop) * float_width
crop_off = tf.cast(crop_off, dtype=tf.int32)
crop_size = width - 2 * crop_off
image = tf.image.crop_to_bounding_box(image, crop_off, crop_off,
crop_size, crop_size)
# `crop_polygons` expend 2-dim off and size
crop_off = tf.tile(tf.expand_dims(crop_off, axis=0), [ 2 ])
crop_size = tf.tile(tf.expand_dims(crop_size, axis=0), [ 2 ])
polygons = self.crop_polygons(polygons, crop_off, crop_size)
return image, polygons
def crop_polygons(self, polygons, crop_off, crop_size):
# NOTE: `crop_off = [ height, width ]`
polygons -= tf.cast(tf.gather(crop_off, [ 1, 0 ]), dtype=tf.float32)
return polygons
def filter_polygons(self, polygons, colors, image_size):
polygon_centers = tf.reduce_mean(polygons, axis=1)
# Coordinate-wise mask
polygon_mask = tf.logical_and(polygon_centers >= 0.0, \
polygon_centers <= tf.cast(image_size, dtype=tf.float32))
# Polygon-wise mask
polygon_mask = tf.logical_and(polygon_mask[:, 0], polygon_mask[:, 1])
return tf.where(polygon_mask, polygons, -tf.ones_like(polygons)), \
tf.where(polygon_mask, colors, tf.zeros_like(colors))
def rot90_polygons(self, image, polygons, rot_count):
angle = (math.pi / 2.0) * tf.cast(rot_count, dtype=tf.float32)
return self.rotate_polygons(image, polygons, angle)
def rotate_polygons(self, image, polygons, angle):
cos = tf.cos(angle)
sin = tf.sin(angle)
matrix = tf.reshape(tf.stack([ cos, -sin, sin, cos ]), shape=[ 2, 2 ])
# Flatten
old_shape = polygons.shape
polygons = tf.reshape(polygons, [ old_shape[0] * old_shape[1], 2 ])
# Rotate
center = tf.cast(tf.gather(tf.shape(image)[:2], [ 1, 0 ]),
dtype=tf.float32) / 2.0
polygons = tf.matmul(polygons - center, matrix) + center
# Restore shape
polygons = tf.reshape(polygons, old_shape)
return polygons
def create_grid(self, polygons, colors):
rects = self.polygons_to_rects(polygons, colors)
cell_starts = create_cell_starts(self.grid_size)
# Broadcast
center = tf.expand_dims(rects['center'], axis=0)
center = tf.expand_dims(center, axis=0, name='broadcast_center')
center -= cell_starts
rect_count = rects['center'].shape[0]
# Test
is_in_cell = tf.logical_and(center >= 0.0, center < 1 / self.grid_size)
is_in_cell = tf.reduce_min(tf.cast(is_in_cell, dtype=tf.float32), axis=-1,
name='is_in_cell')
is_non_empty_cell = tf.reduce_max(is_in_cell, axis=-1, keepdims=True,
name='is_non_empty_cell')
first_in_cell = tf.one_hot(tf.argmax(is_in_cell, axis=-1), depth=rect_count,
axis=-1, name='first_in_cell') * is_non_empty_cell
# Tile sizes, angles, and confidence
rest = rects['rest']
rest = tf.reshape(rest, [ 1, 1, rest.shape[0], rest.shape[-1] ])
rest = tf.tile(rest, [ self.grid_size, self.grid_size, 1, 1 ],
name='broadcast_rest')
# Rescale center so that it would be in [ 0, 1) range
center *= float(self.grid_size)
rect = tf.concat([ center, rest ], axis=-1)
grid = tf.expand_dims(first_in_cell, axis=-1) * rect
grid = tf.reduce_sum(grid, axis=2, name='shallow_grid')
# Add extra dimension for grid depth
grid = tf.expand_dims(grid, axis=2, name='grid')
return grid
def polygons_to_rects(self, polygons, colors):
center = tf.reduce_mean(polygons, axis=1)
p0, p1, p2, p3 = \
polygons[:, 0], polygons[:, 1], polygons[:, 2], polygons[:, 3]
diag02 = p0 - p2
diag13 = p1 - p3
diag = (tf.norm(diag02, axis=-1) + tf.norm(diag13, axis=-1)) / 2.0
v01 = p0 - p1
v03 = p0 - p3
v21 = p2 - p1
v23 = p2 - p3
area = self.triangle_area(v01, v03) + self.triangle_area(v21, v23)
# Compute box width/height using quadratic equation
disc = tf.sqrt(diag ** 4 - 4 * area ** 2)
# NOTE: `abs` is added just in case, to prevent nan on disc close to 0
width = tf.sqrt(tf.abs(diag ** 2 + disc) / 2.0)
height = tf.sqrt(tf.abs(diag ** 2 - disc) / 2.0)
size = tf.stack([ width, height ], axis=1)
# Find longest side
sides = tf.stack([ v01, v03, v21, v23 ], axis=1)
side_lens = tf.norm(sides, axis=-1)
max_side_i = tf.argmax(side_lens, axis=1)
max_side_hot = tf.expand_dims(tf.one_hot(max_side_i, 4), axis=-1)
max_side = tf.reduce_sum(max_side_hot * sides, axis=1)
angle = tf.atan2(max_side[:, 1], max_side[:, 0])
angle = tf.where(angle < 0.0, angle + math.pi, angle)
angle = tf.stack([ tf.cos(angle), tf.sin(angle) ], axis=-1, name='angle')
# Rescale offsets, sizes to be a percent of image size
center /= float(self.image_size)
size /= float(self.image_size)
rect_count = center.shape[0]
confidence = tf.ones([ rect_count, 1 ], dtype=tf.float32)
rest = [ size, angle, confidence ]
for i, max_val in enumerate(COLOR_DIMS):
color = tf.one_hot(colors[:, i], max_val, dtype=tf.float32)
rest.append(color)
rest = tf.concat(rest, axis=-1)
return { 'center': center, 'rest': rest }
def triangle_area(self, side1, side2):
return tf.abs(side1[:, | |
/ client["num_ingress" + word + "_packets"], 2),
round(cfg["dut2_num_egress" + word + "_bytes"] / 1000000000, 2), cfg["dut2_num_egress" + word + "_packets"],
round(cfg["dut2_num_egress" + word + "_bytes"] / cfg["dut2_num_egress" + word + "_packets"], 2),
cfg["dut2_real"]])
for server in cfg["loadgen_servers"]:
table.append([cfg["dut1_real"], round(cfg["dut1_num_ingress" + word + "_bytes"] / 1000000000, 2),
cfg["dut1_num_ingress" + word + "_packets"],
round(cfg["dut1_num_ingress" + word + "_bytes"] / cfg["dut1_num_ingress" + word + "_packets"], 2),
round(server["num_egress" + word + "_bytes"] / 1000000000, 2), server["num_egress" + word + "_packets"],
round(server["num_egress" + word + "_bytes"] / server["num_egress" + word + "_packets"], 2),
server["real_port"]])
except Exception as e:
table.append(["An error occurred while creating the table."])
f.write(tabulate(table, tablefmt="fancy_grid")) # creates table with the help of tabulate module
def p4_dev_results(self, file_id):
#TODO: stability: what if exception?
time_created = "not available"
try:
time_created = time.strftime('%H:%M:%S %d.%m.%Y', time.localtime(int(file_id)))
except:
pass
try:
with open(P4STA_utils.get_results_path(file_id) + "/p4_dev_" + str(file_id) + ".json", "r") as file:
sw = json.load(file)
except Exception as e:
print(self.red("CORE Exception: " + traceback.format_exc()))
if sw["delta_counter"] != 0:
average = sw["total_deltas"]/sw["delta_counter"]
else:
average = 0
range_delta = sw["max_delta"] - sw["min_delta"]
sw["average"] = calculate.find_unit([average])
sw["min_delta"] = calculate.find_unit(sw["min_delta"])
sw["max_delta"] = calculate.find_unit(sw["max_delta"])
sw["range"] = calculate.find_unit(range_delta)
sw["pkt"] = sw["delta_counter"]
sw["time"] = time_created
sw["filename"] = file_id
###################################################
########## compute avg packet sizes ###############
########## compute total throughput ###############
###################################################
# all packets
sw["dut1_avg_packet_size_ingress"] = sw["dut1_avg_packet_size_egress"] = sw["dut2_avg_packet_size_ingress"] = sw["dut2_avg_packet_size_egress"] = 0
if sw["dut1_num_ingress_packets"] > 0: sw["dut1_avg_packet_size_ingress"] = round(sw["dut1_num_ingress_bytes"]/sw["dut1_num_ingress_packets"], 1)
if sw["dut1_num_egress_packets"] > 0: sw["dut1_avg_packet_size_egress"] = round(sw["dut1_num_egress_bytes"]/sw["dut1_num_egress_packets"], 1)
if sw["dut2_num_ingress_packets"] > 0: sw["dut2_avg_packet_size_ingress"] = round(sw["dut2_num_ingress_bytes"]/sw["dut2_num_ingress_packets"], 1)
if sw["dut2_num_egress_packets"] > 0: sw["dut2_avg_packet_size_egress"] = round(sw["dut2_num_egress_bytes"]/sw["dut2_num_egress_packets"], 1)
sw["dut1_throughput_gbyte_ingress"] = round(sw["dut1_num_ingress_bytes"]/1000000000, 2)
sw["dut1_throughput_gbyte_egress"] = round(sw["dut1_num_egress_bytes"]/1000000000, 2)
sw["dut2_throughput_gbyte_ingress"] = round(sw["dut2_num_ingress_bytes"]/1000000000, 2)
sw["dut2_throughput_gbyte_egress"] = round(sw["dut2_num_egress_bytes"]/1000000000, 2)
for port in sw["loadgen_servers"] + sw["loadgen_clients"]:
port["avg_packet_size_ingress"] = port["avg_packet_size_egress"] = 0
if port["num_ingress_packets"] > 0:
port["avg_packet_size_ingress"] = round(port["num_ingress_bytes"]/port["num_ingress_packets"], 1)
if port["num_egress_packets"] > 0:
port["avg_packet_size_egress"] = round(port["num_egress_bytes"]/port["num_egress_packets"], 1)
port["throughput_gbyte_ingress"] = round(port["num_ingress_bytes"]/1000000000, 2)
port["throughput_gbyte_egress"] = round(port["num_egress_bytes"]/1000000000, 2)
# stamped packets
try:
sw["dut1_throughput_gbyte_ingress_stamped"] = round(sw["dut1_num_ingress_stamped_bytes"]/1000000000, 2)
sw["dut1_throughput_gbyte_egress_stamped"] = round(sw["dut1_num_egress_stamped_bytes"]/1000000000, 2)
sw["dut2_throughput_gbyte_ingress_stamped"] = round(sw["dut2_num_ingress_stamped_bytes"]/1000000000, 2)
sw["dut2_throughput_gbyte_egress_stamped"] = round(sw["dut2_num_egress_stamped_bytes"]/1000000000, 2)
sw["dut1_avg_packet_size_ingress_stamped"] = sw["dut1_avg_packet_size_egress_stamped"] = sw["dut2_avg_packet_size_ingress_stamped"] = sw["dut2_avg_packet_size_egress_stamped"] = 0
if sw["dut1_num_ingress_stamped_packets"] > 0: sw["dut1_avg_packet_size_ingress_stamped"] = round(sw["dut1_num_ingress_stamped_bytes"] / sw["dut1_num_ingress_stamped_packets"], 1)
if sw["dut1_num_egress_stamped_packets"] > 0: sw["dut1_avg_packet_size_egress_stamped"] = round(sw["dut1_num_egress_stamped_bytes"] / sw["dut1_num_egress_stamped_packets"], 1)
if sw["dut2_num_ingress_stamped_packets"] > 0: sw["dut2_avg_packet_size_ingress_stamped"] = round(sw["dut2_num_ingress_stamped_bytes"] / sw["dut2_num_ingress_stamped_packets"], 1)
if sw["dut2_num_egress_stamped_packets"] > 0: sw["dut2_avg_packet_size_egress_stamped"] = round(sw["dut2_num_egress_stamped_bytes"] / sw["dut2_num_egress_stamped_packets"], 1)
for port in sw["loadgen_servers"] + sw["loadgen_clients"]:
port["avg_packet_size_ingress_stamped"] = port["avg_packet_size_egress_stamped"] = 0
if port["num_ingress_stamped_packets"] > 0:
port["avg_packet_size_ingress_stamped"] = round(port["num_ingress_stamped_bytes"] / port["num_ingress_stamped_packets"], 1)
if port["num_egress_stamped_packets"] > 0:
port["avg_packet_size_egress_stamped"] = round(port["num_egress_stamped_bytes"] / port["num_egress_stamped_packets"], 1)
port["throughput_gbyte_ingress_stamped"] = round(port["num_ingress_stamped_bytes"] / 1000000000, 2)
port["throughput_gbyte_egress_stamped"] = round(port["num_egress_stamped_bytes"] / 1000000000, 2)
except:
pass # if target has stamped counter not implemented yet (html will be automatically empty)
###################################################
########## compute packet losses ##################
###################################################
# for all packets
if sw["dut1"] != sw["dut2"]: # if same only dut1 is used
sw["packet_loss_1"] = sw["dut1_num_egress_packets"] - sw["dut2_num_ingress_packets"]
if sw["packet_loss_1"] > 0:
sw["packet_loss_1_percent"] = round((sw["packet_loss_1"] / sw["dut1_num_egress_packets"]) * 100, 2)
else:
sw["packet_loss_1_percent"] = 0
sw["packet_loss_2"] = sw["dut2_num_egress_packets"] - sw["dut1_num_ingress_packets"]
if sw["packet_loss_2"] > 0:
sw["packet_loss_2_percent"] = round((sw["packet_loss_2"] / sw["dut2_num_egress_packets"]) * 100, 2)
else:
sw["packet_loss_2_percent"] = 0
else:
sw["packet_loss_1"] = abs(sw["dut1_num_egress_packets"] - sw["dut1_num_ingress_packets"])
divider = max(sw["dut1_num_egress_packets"], sw["dut1_num_ingress_packets"])
if sw["packet_loss_1"] > 0:
sw["packet_loss_1_percent"] = round((sw["packet_loss_1"] / divider) * 100, 2)
else:
sw["packet_loss_1_percent"] = 0
sw["packet_loss_2"] = "n/a"
# for stamped packets only
if sw["dut1"] != sw["dut2"]:
sw["packet_loss_stamped_1"] = sw["dut1_num_egress_stamped_packets"] - sw["dut2_num_ingress_stamped_packets"]
if sw["packet_loss_stamped_1"] > 0:
sw["packet_loss_stamped_1_percent"] = round((sw["packet_loss_stamped_1"]/sw["dut1_num_egress_stamped_packets"])*100, 2)
else:
sw["packet_loss_stamped_1_percent"] = 0
sw["packet_loss_stamped_2"] = sw["dut2_num_egress_stamped_packets"] - sw["dut1_num_ingress_stamped_packets"]
if sw["packet_loss_stamped_2"] > 0:
sw["packet_loss_stamped_2_percent"] = round((sw["packet_loss_stamped_2"]/sw["dut2_num_egress_stamped_packets"])*100, 2)
else:
sw["packet_loss_stamped_2_percent"] = 0
else:
sw["packet_loss_stamped_1"] = abs(sw["dut1_num_egress_stamped_packets"] - sw["dut1_num_ingress_stamped_packets"])
divider = max(sw["dut1_num_egress_stamped_packets"], sw["dut1_num_ingress_stamped_packets"])
if sw["packet_loss_stamped_1"] > 0:
sw["packet_loss_1_stamped_percent"] = round((sw["packet_loss_stamped_1"] / divider) * 100, 2)
else:
sw["packet_loss_1_stamped_percent"] = 0
sw["packet_loss_stamped_2"] = "n/a"
return sw
# resets registers in p4 device by overwriting them with 0
def reset(self):
target = self.target_obj(P4STA_utils.read_current_cfg()["selected_target"])
target.reset_p4_registers(P4STA_utils.read_current_cfg())
def p4_dev_status(self):
cfg = P4STA_utils.read_current_cfg()
target = self.target_obj(cfg["selected_target"])
lines_pm, running, dev_status = target.p4_dev_status(cfg)
for host in (cfg["loadgen_servers"] + cfg["loadgen_clients"]):
pingresp = (os.system("timeout 1 ping " + host["ssh_ip"] + " -c 1") == 0) # if ping works it should be true
host["reachable"] = pingresp
if pingresp:
output_host = subprocess.run(
[project_path + "/scripts/ethtool.sh", host["ssh_ip"], host["ssh_user"], host["loadgen_iface"]],
stdout=subprocess.PIPE)
pos = output_host.stdout.decode("utf-8").find("Link detected")
try:
if str(output_host.stdout.decode("utf-8")[pos + 15:pos + 18]) == "yes":
host["link"] = "up"
else:
host["link"] = "down"
except:
host["link"] = "error"
else:
host["link"] = "down"
return cfg, lines_pm, running, dev_status
def start_p4_dev_software(self):
target = self.target_obj(P4STA_utils.read_current_cfg()["selected_target"])
target.start_p4_dev_software(P4STA_utils.read_current_cfg())
def get_p4_dev_startup_log(self):
target = self.target_obj(P4STA_utils.read_current_cfg()["selected_target"])
return target.get_p4_dev_startup_log(P4STA_utils.read_current_cfg())
def stop_p4_dev_software(self):
target = self.target_obj(P4STA_utils.read_current_cfg()["selected_target"])
target.stop_p4_dev_software(P4STA_utils.read_current_cfg())
def reboot(self):
cfg = P4STA_utils.read_current_cfg()
for host in (cfg["loadgen_servers"]+cfg["loadgen_clients"]):
subprocess.run([project_path + "/scripts/reboot.sh", host["ssh_user"], host["ssh_ip"]], stdout=subprocess.PIPE)
def refresh_links(self):
cfg = P4STA_utils.read_current_cfg()
for host in (cfg["loadgen_servers"]+cfg["loadgen_clients"]):
subprocess.run([project_path + "/scripts/refresh_links.sh", host["ssh_user"], host["ssh_ip"], host["loadgen_iface"]])
def visualization(self):
target = self.target_obj(P4STA_utils.read_current_cfg()["selected_target"])
return target.visualization(P4STA_utils.read_current_cfg())
def set_new_measurement_id(self):
file_id = str(int(round(time.time()))) # generates name (time in sec since 1.1.1970)4
P4staCore.measurement_id = file_id
return file_id
def get_current_results_path(self):
return P4STA_utils.get_results_path(P4staCore.measurement_id)
def start_external(self):
file_id = str(P4staCore.measurement_id)
cfg = P4STA_utils.read_current_cfg()
target = self.target_obj(cfg["selected_target"])
lines_pm, running, dev_status = target.p4_dev_status(P4STA_utils.read_current_cfg())
# backup current config (e.g. ports, speed) to results directory
if not os.path.exists(self.get_current_results_path()):
os.makedirs(self.get_current_results_path())
shutil.copy(project_path + "/data/config.json", os.path.join(self.get_current_results_path(), "config_"+str(P4staCore.measurement_id)+".json") )
if cfg["selected_target"] != "bmv2":
multi = 1 # 1 = nanoseconds
else:
multi = 1000 # 1000 = microseconds
ext_py_dir = project_path + "/extHost/pythonExtHost"
if running:
output= subprocess.run([project_path + "/scripts/start_external.sh", file_id, cfg["ext_host_if"], cfg["ext_host_ssh"], cfg["ext_host_user"], ext_py_dir, str(multi)])
#print(output)
return running
def stop_external(self):
cfg = P4STA_utils.read_current_cfg()
try:
if int(P4staCore.measurement_id) == -1:
raise Exception
out = subprocess.run([project_path + "/scripts/stop_external.sh", str(P4staCore.measurement_id), cfg["ext_host_ssh"], cfg["ext_host_user"], project_path])
input = ["ssh", "-o", "StrictHostKeyChecking=no", cfg["ext_host_user"] + "@" + cfg["ext_host_ssh"], "cd p4sta/receiver; ./check_extH_status.sh; exit"]
time.sleep(5)
while True: #wait until exthost stopped
time.sleep(1)
res = subprocess.Popen(input, stdout=subprocess.PIPE).stdout
result = res.read().decode()
if result.find("1") > -1:
# if 1 is found by check_extH_status.sh at external host, receiver has finished saving csv files
break
out = subprocess.run([project_path + "/scripts/retrieve_external_results.sh", str(P4staCore.measurement_id), cfg["ext_host_ssh"], cfg["ext_host_user"], self.get_current_results_path()])
stoppable = True
except:
stoppable = False
#subprocess.run([project_path + "/scripts/stop_all_py.sh", cfg["ext_host_ssh"], cfg["ext_host_user"]])
# kills mininet and the CLI too .. not good
self.read_p4_device()
self.external_results(str(P4staCore.measurement_id))
return stoppable
# displays results from external host python receiver from return of calculate module
def external_results(self, measurement_id):
cfg = self.read_result_cfg(str(measurement_id))
extH_results = calculate.main(str(measurement_id), cfg["multicast"], P4STA_utils.get_results_path(measurement_id))
ipdv_range = extH_results["max_ipdv"] - extH_results["min_ipdv"]
pdv_range = extH_results["max_pdv"] - extH_results["min_pdv"]
rate_jitter_range = extH_results["max_packets_per_second"] - extH_results["min_packets_per_second"]
latency_range = extH_results["max_latency"] - extH_results["min_latency"]
f = open(P4STA_utils.get_results_path(measurement_id) + "/output_external_host_" + str(measurement_id) + ".txt", "w+")
f.write("Results from externel Host for every " + str(cfg["multicast"] + ". packet") + "\n")
f.write("Raw packets: " + str(extH_results["num_raw_packets"]) + " Processed packets: " + str(
extH_results["num_processed_packets"]) + " Total throughput: " + str(
extH_results["total_throughput"]) + " Megabytes \n")
f.write("Min latency: " + str(calculate.find_unit(extH_results["min_latency"])[0][0]) + " " + str(
calculate.find_unit(extH_results["min_latency"])[1]))
f.write(" Max latency: " + str(calculate.find_unit(extH_results["max_latency"])[0][0]) + " " + str(
calculate.find_unit(extH_results["max_latency"])[1]))
f.write(" Average latency: " + str(calculate.find_unit(extH_results["avg_latency"])[0][0]) + " " + str(
calculate.find_unit(extH_results["avg_latency"])[1]) + "\n")
f.write("Min IPDV: " + str(calculate.find_unit(extH_results["min_ipdv"])[0][0]) + " " + str(
calculate.find_unit(extH_results["min_ipdv"])[1]) + "\n")
f.write("Max IPDV: " + str(calculate.find_unit(extH_results["max_ipdv"])[0][0]) + " " + str(
calculate.find_unit(extH_results["max_ipdv"])[1]) + "\n")
f.write("Average IPDV: " + str(calculate.find_unit(extH_results["avg_ipdv"])[0][0]) + " " + str(
calculate.find_unit(extH_results["avg_ipdv"])[1])
+ " and abs(): " + str(calculate.find_unit(extH_results["avg_abs_ipdv"])[0][0]) + " " + str(
calculate.find_unit(extH_results["avg_abs_ipdv"])[1]) + "\n")
f.write("Min PDV: " + str(calculate.find_unit(extH_results["min_pdv"])[0][0]) + " " + str(
calculate.find_unit(extH_results["min_pdv"])[1]) + "\n")
f.write("Max PDV: " + str(calculate.find_unit(extH_results["max_pdv"])[0][0]) + " " + str(
calculate.find_unit(extH_results["max_pdv"])[1]) + "\n")
f.write("Average PDV: " + str(calculate.find_unit(extH_results["avg_pdv"])[0][0]) + " " + str(
calculate.find_unit(extH_results["avg_pdv"])[1]) + "\n")
f.write("Min packet/s: " + str(extH_results["min_packets_per_second"]) + " Max packet/s: " + str(
extH_results["max_packets_per_second"]) + " Average packet/s: " + str(
extH_results["avg_packets_per_second"]) + "\n")
f.close()
def fetch_interface(self, ssh_user, ssh_ip, iface):
try:
lines = subprocess.run([project_path + "/core/scripts/fetch.sh", ssh_user, ssh_ip, iface], stdout=subprocess.PIPE).stdout.decode("utf-8").split("\n")
mac_line = ""
ipv4_line = ""
for l in range(0, len(lines)):
if lines[l].find(iface) > -1:
try:
for i in range(0, 10):
if lines[l + i].find("ether") > -1 or lines[l + i].find("HWaddr") > -1:
mac_line = lines[l + i]
break
except:
mac_line = ""
try:
for i in range(0, 10):
found = lines[l + i].find("inet ")
if found > -1: | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Assignment(pulumi.CustomResource):
description: pulumi.Output[str]
"""
A description to use for this Policy Assignment. Changing this forces a new resource to be created.
"""
display_name: pulumi.Output[str]
"""
A friendly display name to use for this Policy Assignment. Changing this forces a new resource to be created.
"""
identity: pulumi.Output[dict]
"""
An `identity` block.
* `principal_id` (`str`) - The Principal ID of this Policy Assignment if `type` is `SystemAssigned`.
* `tenant_id` (`str`) - The Tenant ID of this Policy Assignment if `type` is `SystemAssigned`.
* `type` (`str`) - The Managed Service Identity Type of this Policy Assignment. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you), or `None` (no use of a Managed Service Identity).
"""
location: pulumi.Output[str]
"""
The Azure location where this policy assignment should exist. This is required when an Identity is assigned. Changing this forces a new resource to be created.
"""
name: pulumi.Output[str]
"""
The name of the Policy Assignment. Changing this forces a new resource to be created.
"""
not_scopes: pulumi.Output[list]
"""
A list of the Policy Assignment's excluded scopes. The list must contain Resource IDs (such as Subscriptions e.g. `/subscriptions/00000000-0000-0000-000000000000` or Resource Groups e.g.`/subscriptions/00000000-0000-0000-000000000000/resourceGroups/myResourceGroup`).
"""
parameters: pulumi.Output[str]
"""
Parameters for the policy definition. This field is a JSON object that maps to the Parameters field from the Policy Definition. Changing this forces a new resource to be created.
"""
policy_definition_id: pulumi.Output[str]
"""
The ID of the Policy Definition to be applied at the specified Scope.
"""
scope: pulumi.Output[str]
"""
The Scope at which the Policy Assignment should be applied, which must be a Resource ID (such as Subscription e.g. `/subscriptions/00000000-0000-0000-000000000000` or a Resource Group e.g.`/subscriptions/00000000-0000-0000-000000000000/resourceGroups/myResourceGroup`). Changing this forces a new resource to be created.
"""
def __init__(__self__, resource_name, opts=None, description=None, display_name=None, identity=None, location=None, name=None, not_scopes=None, parameters=None, policy_definition_id=None, scope=None, __props__=None, __name__=None, __opts__=None):
"""
Configures the specified Policy Definition at the specified Scope. Also, Policy Set Definitions are supported.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_definition = azure.policy.Definition("exampleDefinition",
policy_type="Custom",
mode="All",
display_name="my-policy-definition",
policy_rule=\"\"\" {
"if": {
"not": {
"field": "location",
"in": "[parameters('allowedLocations')]"
}
},
"then": {
"effect": "audit"
}
}
\"\"\",
parameters=\"\"\" {
"allowedLocations": {
"type": "Array",
"metadata": {
"description": "The list of allowed locations for resources.",
"displayName": "Allowed locations",
"strongType": "location"
}
}
}
\"\"\")
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_assignment = azure.policy.Assignment("exampleAssignment",
scope=example_resource_group.id,
policy_definition_id=example_definition.id,
description="Policy Assignment created via an Acceptance Test",
display_name="My Example Policy Assignment",
parameters=\"\"\"{
"allowedLocations": {
"value": [ "West Europe" ]
}
}
\"\"\")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description to use for this Policy Assignment. Changing this forces a new resource to be created.
:param pulumi.Input[str] display_name: A friendly display name to use for this Policy Assignment. Changing this forces a new resource to be created.
:param pulumi.Input[dict] identity: An `identity` block.
:param pulumi.Input[str] location: The Azure location where this policy assignment should exist. This is required when an Identity is assigned. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Policy Assignment. Changing this forces a new resource to be created.
:param pulumi.Input[list] not_scopes: A list of the Policy Assignment's excluded scopes. The list must contain Resource IDs (such as Subscriptions e.g. `/subscriptions/00000000-0000-0000-000000000000` or Resource Groups e.g.`/subscriptions/00000000-0000-0000-000000000000/resourceGroups/myResourceGroup`).
:param pulumi.Input[str] parameters: Parameters for the policy definition. This field is a JSON object that maps to the Parameters field from the Policy Definition. Changing this forces a new resource to be created.
:param pulumi.Input[str] policy_definition_id: The ID of the Policy Definition to be applied at the specified Scope.
:param pulumi.Input[str] scope: The Scope at which the Policy Assignment should be applied, which must be a Resource ID (such as Subscription e.g. `/subscriptions/00000000-0000-0000-000000000000` or a Resource Group e.g.`/subscriptions/00000000-0000-0000-000000000000/resourceGroups/myResourceGroup`). Changing this forces a new resource to be created.
The **identity** object supports the following:
* `principal_id` (`pulumi.Input[str]`) - The Principal ID of this Policy Assignment if `type` is `SystemAssigned`.
* `tenant_id` (`pulumi.Input[str]`) - The Tenant ID of this Policy Assignment if `type` is `SystemAssigned`.
* `type` (`pulumi.Input[str]`) - The Managed Service Identity Type of this Policy Assignment. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you), or `None` (no use of a Managed Service Identity).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['display_name'] = display_name
__props__['identity'] = identity
__props__['location'] = location
__props__['name'] = name
__props__['not_scopes'] = not_scopes
__props__['parameters'] = parameters
if policy_definition_id is None:
raise TypeError("Missing required property 'policy_definition_id'")
__props__['policy_definition_id'] = policy_definition_id
if scope is None:
raise TypeError("Missing required property 'scope'")
__props__['scope'] = scope
super(Assignment, __self__).__init__(
'azure:policy/assignment:Assignment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, description=None, display_name=None, identity=None, location=None, name=None, not_scopes=None, parameters=None, policy_definition_id=None, scope=None):
"""
Get an existing Assignment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description to use for this Policy Assignment. Changing this forces a new resource to be created.
:param pulumi.Input[str] display_name: A friendly display name to use for this Policy Assignment. Changing this forces a new resource to be created.
:param pulumi.Input[dict] identity: An `identity` block.
:param pulumi.Input[str] location: The Azure location where this policy assignment should exist. This is required when an Identity is assigned. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Policy Assignment. Changing this forces a new resource to be created.
:param pulumi.Input[list] not_scopes: A list of the Policy Assignment's excluded scopes. The list must contain Resource IDs (such as Subscriptions e.g. `/subscriptions/00000000-0000-0000-000000000000` or Resource Groups e.g.`/subscriptions/00000000-0000-0000-000000000000/resourceGroups/myResourceGroup`).
:param pulumi.Input[str] parameters: Parameters for the policy definition. This field is a JSON object that maps to the Parameters field from the Policy Definition. Changing this forces a new resource to be created.
:param pulumi.Input[str] policy_definition_id: The ID of the Policy Definition to be applied at the specified Scope.
:param pulumi.Input[str] scope: The Scope at which the Policy Assignment should be applied, which must be a Resource ID (such as Subscription e.g. `/subscriptions/00000000-0000-0000-000000000000` or a Resource Group e.g.`/subscriptions/00000000-0000-0000-000000000000/resourceGroups/myResourceGroup`). Changing this forces a new resource to be created.
The **identity** object supports the following:
* `principal_id` (`pulumi.Input[str]`) - The Principal ID of this Policy Assignment if `type` is `SystemAssigned`.
* `tenant_id` (`pulumi.Input[str]`) - The Tenant ID of this Policy Assignment if `type` is `SystemAssigned`.
* `type` (`pulumi.Input[str]`) - The Managed Service Identity Type of this Policy Assignment. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you), or `None` (no use of a Managed Service Identity).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = description
__props__["display_name"] = display_name
__props__["identity"] = identity
__props__["location"] = location
__props__["name"] = name
__props__["not_scopes"] = not_scopes
__props__["parameters"] = parameters
__props__["policy_definition_id"] = policy_definition_id
__props__["scope"] = scope
return Assignment(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or | |
URL to which the above requests will be
rewritten by.
:type destination: str
:param preserve_unmatched_path: Whether to preserve unmatched path. Default value is true.
:type preserve_unmatched_path: bool
"""
_validation = {
'odata_type': {'required': True, 'constant': True},
'source_pattern': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'source_pattern': {'key': 'sourcePattern', 'type': 'str'},
'destination': {'key': 'destination', 'type': 'str'},
'preserve_unmatched_path': {'key': 'preserveUnmatchedPath', 'type': 'bool'},
}
odata_type = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlRewriteActionParameters"
def __init__(
self,
*,
source_pattern: str,
destination: str,
preserve_unmatched_path: Optional[bool] = None,
**kwargs
):
super(UrlRewriteActionParameters, self).__init__(**kwargs)
self.source_pattern = source_pattern
self.destination = destination
self.preserve_unmatched_path = preserve_unmatched_path
class UrlSigningAction(DeliveryRuleAction):
"""Defines the url signing action for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the action for the delivery rule.Constant filled by server.
Possible values include: "CacheExpiration", "CacheKeyQueryString", "ModifyRequestHeader",
"ModifyResponseHeader", "UrlRedirect", "UrlRewrite", "UrlSigning", "OriginGroupOverride".
:type name: str or ~azure.mgmt.cdn.models.DeliveryRuleActionEnum
:param parameters: Required. Defines the parameters for the action.
:type parameters: ~azure.mgmt.cdn.models.UrlSigningActionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlSigningActionParameters'},
}
def __init__(
self,
*,
parameters: "UrlSigningActionParameters",
**kwargs
):
super(UrlSigningAction, self).__init__(**kwargs)
self.name = 'UrlSigning' # type: str
self.parameters = parameters
class UrlSigningActionParameters(msrest.serialization.Model):
"""Defines the parameters for the Url Signing action.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlSigningActionParameters".
:vartype odata_type: str
:param algorithm: Algorithm to use for URL signing. Possible values include: "SHA256".
:type algorithm: str or ~azure.mgmt.cdn.models.Algorithm
:param parameter_name_override: Defines which query string parameters in the url to be
considered for expires, key id etc.
:type parameter_name_override: list[~azure.mgmt.cdn.models.UrlSigningParamIdentifier]
"""
_validation = {
'odata_type': {'required': True, 'constant': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'algorithm': {'key': 'algorithm', 'type': 'str'},
'parameter_name_override': {'key': 'parameterNameOverride', 'type': '[UrlSigningParamIdentifier]'},
}
odata_type = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlSigningActionParameters"
def __init__(
self,
*,
algorithm: Optional[Union[str, "Algorithm"]] = None,
parameter_name_override: Optional[List["UrlSigningParamIdentifier"]] = None,
**kwargs
):
super(UrlSigningActionParameters, self).__init__(**kwargs)
self.algorithm = algorithm
self.parameter_name_override = parameter_name_override
class UrlSigningKey(msrest.serialization.Model):
"""Url signing key.
All required parameters must be populated in order to send to Azure.
:param key_id: Required. Defines the customer defined key Id. This id will exist in the
incoming request to indicate the key used to form the hash.
:type key_id: str
:param key_source_parameters: Required. Defines the parameters for using customer key vault for
Url Signing Key.
:type key_source_parameters: ~azure.mgmt.cdn.models.KeyVaultSigningKeyParameters
"""
_validation = {
'key_id': {'required': True},
'key_source_parameters': {'required': True},
}
_attribute_map = {
'key_id': {'key': 'keyId', 'type': 'str'},
'key_source_parameters': {'key': 'keySourceParameters', 'type': 'KeyVaultSigningKeyParameters'},
}
def __init__(
self,
*,
key_id: str,
key_source_parameters: "KeyVaultSigningKeyParameters",
**kwargs
):
super(UrlSigningKey, self).__init__(**kwargs)
self.key_id = key_id
self.key_source_parameters = key_source_parameters
class UrlSigningKeyParameters(SecretParameters):
"""Url signing key parameters.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the Secret to create.Constant filled by server. Possible
values include: "UrlSigningKey", "CustomerCertificate", "ManagedCertificate".
:type type: str or ~azure.mgmt.cdn.models.SecretType
:param key_id: Required. Defines the customer defined key Id. This id will exist in the
incoming request to indicate the key used to form the hash.
:type key_id: str
:param secret_source: Required. Resource reference to the KV secret.
:type secret_source: ~azure.mgmt.cdn.models.ResourceReference
:param secret_version: Version of the secret to be used.
:type secret_version: str
"""
_validation = {
'type': {'required': True},
'key_id': {'required': True},
'secret_source': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'key_id': {'key': 'keyId', 'type': 'str'},
'secret_source': {'key': 'secretSource', 'type': 'ResourceReference'},
'secret_version': {'key': 'secretVersion', 'type': 'str'},
}
def __init__(
self,
*,
key_id: str,
secret_source: "ResourceReference",
secret_version: Optional[str] = None,
**kwargs
):
super(UrlSigningKeyParameters, self).__init__(**kwargs)
self.type = 'UrlSigningKey' # type: str
self.key_id = key_id
self.secret_source = secret_source
self.secret_version = secret_version
class UrlSigningParamIdentifier(msrest.serialization.Model):
"""Defines how to identify a parameter for a specific purpose e.g. expires.
All required parameters must be populated in order to send to Azure.
:param param_indicator: Required. Indicates the purpose of the parameter. Possible values
include: "Expires", "KeyId", "Signature".
:type param_indicator: str or ~azure.mgmt.cdn.models.ParamIndicator
:param param_name: Required. Parameter name.
:type param_name: str
"""
_validation = {
'param_indicator': {'required': True},
'param_name': {'required': True},
}
_attribute_map = {
'param_indicator': {'key': 'paramIndicator', 'type': 'str'},
'param_name': {'key': 'paramName', 'type': 'str'},
}
def __init__(
self,
*,
param_indicator: Union[str, "ParamIndicator"],
param_name: str,
**kwargs
):
super(UrlSigningParamIdentifier, self).__init__(**kwargs)
self.param_indicator = param_indicator
self.param_name = param_name
class Usage(msrest.serialization.Model):
"""Describes resource usage.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource identifier.
:vartype id: str
:param unit: Required. An enum describing the unit of measurement. Possible values include:
"Count".
:type unit: str or ~azure.mgmt.cdn.models.UsageUnit
:param current_value: Required. The current value of the usage.
:type current_value: long
:param limit: Required. The limit of usage.
:type limit: long
:param name: Required. The name of the type of usage.
:type name: ~azure.mgmt.cdn.models.UsageName
"""
_validation = {
'id': {'readonly': True},
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(
self,
*,
unit: Union[str, "UsageUnit"],
current_value: int,
limit: int,
name: "UsageName",
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.id = None
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
class UsageName(msrest.serialization.Model):
"""The usage names.
:param value: A string describing the resource name.
:type value: str
:param localized_value: A localized string describing the resource name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class UsagesListResult(msrest.serialization.Model):
"""The list usages operation response.
:param value: The list of resource usages.
:type value: list[~azure.mgmt.cdn.models.Usage]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Usage"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(UsagesListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class UserManagedHttpsParameters(CustomDomainHttpsParameters):
"""Defines the certificate source parameters using user's keyvault certificate for enabling SSL.
All required parameters must be populated in order to send to Azure.
:param certificate_source: Required. Defines the source of the SSL certificate.Constant filled
by server. Possible values include: "AzureKeyVault", "Cdn".
:type certificate_source: str or ~azure.mgmt.cdn.models.CertificateSource
:param protocol_type: Required. Defines the TLS extension protocol that is used for secure
delivery. Possible values include: "ServerNameIndication", "IPBased".
:type protocol_type: str or ~azure.mgmt.cdn.models.ProtocolType
:param minimum_tls_version: TLS protocol version that will be used for Https. Possible values
include: "None", "TLS10", "TLS12".
:type minimum_tls_version: str or ~azure.mgmt.cdn.models.MinimumTlsVersion
:param certificate_source_parameters: Required. Defines the certificate source parameters using
user's keyvault certificate for enabling SSL.
:type certificate_source_parameters: ~azure.mgmt.cdn.models.KeyVaultCertificateSourceParameters
"""
_validation = {
'certificate_source': {'required': True},
'protocol_type': {'required': True},
'certificate_source_parameters': {'required': True},
}
_attribute_map = {
'certificate_source': {'key': 'certificateSource', 'type': 'str'},
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'str'},
'certificate_source_parameters': {'key': 'certificateSourceParameters', 'type': 'KeyVaultCertificateSourceParameters'},
}
def __init__(
self,
*,
protocol_type: Union[str, "ProtocolType"],
certificate_source_parameters: "KeyVaultCertificateSourceParameters",
minimum_tls_version: Optional[Union[str, "MinimumTlsVersion"]] = None,
**kwargs
):
super(UserManagedHttpsParameters, self).__init__(protocol_type=protocol_type, minimum_tls_version=minimum_tls_version, **kwargs)
self.certificate_source = 'AzureKeyVault' # type: str
self.certificate_source_parameters = certificate_source_parameters
class ValidateCustomDomainInput(msrest.serialization.Model):
"""Input of the custom domain to be validated for DNS mapping.
All required parameters must be populated in order to send to Azure.
:param host_name: Required. The host name of the custom domain. Must be a domain name.
:type host_name: str
"""
_validation = {
'host_name': {'required': True},
}
_attribute_map = {
'host_name': {'key': 'hostName', 'type': 'str'},
}
def __init__(
self,
*,
host_name: str,
**kwargs
):
super(ValidateCustomDomainInput, self).__init__(**kwargs)
self.host_name = host_name
class ValidateCustomDomainOutput(msrest.serialization.Model):
"""Output of custom domain validation.
Variables are only populated by the server, and will be ignored when | |
<filename>dpu_utils/utils/richpath.py
import binascii
import gzip
import json
import os
import glob
import fnmatch
import shutil
import time
import zlib
import io
import pickle
import logging
import tempfile
import re
from abc import ABC, abstractmethod
from collections import OrderedDict
from functools import total_ordering
from typing import Any, List, Optional, Iterable, Callable
from azure.storage.blob import BlockBlobService
from azure.common import AzureHttpError
from dpu_utils.utils.dataloading import save_json_gz, save_jsonl_gz
AZURE_PATH_PREFIX = "azure://"
__all__ = ['RichPath', 'LocalPath', 'AzurePath']
@total_ordering
class RichPath(ABC):
"""
RichPath is an abstraction layer of local and remote paths allowing unified access
of both local and remote files. Currently, only local and Azure blob paths are supported.
To use Azure paths, a .json configuration file needs to be passed in the
`RichPath.create()` function. The file has the format:
```
{
"storage_account_name": {
"sas_token": "<PASSWORD>",
"cache_location": "optional location to cache blobs locally"
}
...
}
```
Where `storage_account_name` is the name of the storage account in the Azure portal.
Multiple storage accounts can be placed in a single file. This allows to address blobs
and "directories" as `azure://storage_account_name/container_name/path/to/blob`. The
Azure SAS token can be retrieved from the Azure portal or from the Azure Storage Explorer.
If an external library requires a local path, you can ensure that a `RichPath`
object represents a local (possibly cached) object by returning
```
original_object.to_local_path().path
```
which will download the remote object(s), if needed, and provide a local path.
"""
def __init__(self, path: str):
self.__path = path
@property
def path(self) -> str:
return self.__path
@staticmethod
def create(path: str, azure_info_path: Optional[str]=None):
"""This creates a RichPath object based on the input path.
To create a remote path, just prefix it appropriately and pass
in the path to the .json configuration.
"""
if path.startswith(AZURE_PATH_PREFIX):
assert azure_info_path is not None, "An AzurePath cannot be created when azure_info_path is None."
# Strip off the AZURE_PATH_PREFIX:
path = path[len(AZURE_PATH_PREFIX):]
account_name, container_name, path = path.split('/', 2)
with open(azure_info_path, 'r') as azure_info_file:
azure_info = json.load(azure_info_file)
account_info = azure_info.get(account_name)
if account_info is None:
raise Exception("Could not find access information for account '%s'!" % (account_name,))
sas_token = account_info.get('sas_token')
account_key = account_info.get('account_key')
if sas_token is not None:
assert not sas_token.startswith('?'), 'SAS tokens should not start with "?". Just delete it.' # https://github.com/Azure/azure-storage-python/issues/301
blob_service = BlockBlobService(account_name=account_name,
sas_token=sas_token)
elif account_key is not None:
blob_service = BlockBlobService(account_name=account_name,
account_key=account_key)
else:
raise Exception("Access to Azure storage account '%s' requires either account_key or sas_token!" % (
account_name,
))
# ERROR is too verbose, in particular when downloading based on etags an error is emitted when blob
# download is aborted.
logging.getLogger('azure.storage').setLevel(logging.CRITICAL)
# Replace environment variables in the cache location
cache_location = account_info.get('cache_location')
if cache_location is not None:
def replace_by_env_var(m) -> str:
env_var_name = m.group(1)
env_var_value = os.environ.get(env_var_name)
if env_var_value is not None:
return env_var_value
else:
return env_var_name
cache_location = re.sub('\${([^}]+)}', replace_by_env_var, cache_location)
return AzurePath(path,
azure_container_name=container_name,
azure_blob_service=blob_service,
cache_location=cache_location)
else:
return LocalPath(path)
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.path < other.path
@abstractmethod
def is_dir(self) -> bool:
pass
@abstractmethod
def is_file(self) -> bool:
pass
@abstractmethod
def make_as_dir(self) -> None:
pass
@abstractmethod
def read_as_binary(self) -> bytes:
"""Read possibly compressed binary file."""
pass
@abstractmethod
def save_as_compressed_file(self, data: Any) -> None:
pass
def read_as_text(self) -> str:
return self.read_as_binary().decode('utf-8')
def read_as_json(self) -> Any:
return json.loads(self.read_as_text(), object_pairs_hook=OrderedDict)
def read_as_jsonl(self, error_handling: Optional[Callable[[str, Exception], None]]=None) -> Iterable[Any]:
"""
Parse JSONL files. See http://jsonlines.org/ for more.
:param error_handling: a callable that receives the original line and the exception object and takes
over how parse error handling should happen.
:return: a iterator of the parsed objects of each line.
"""
for line in self.read_as_text().splitlines():
try:
yield json.loads(line, object_pairs_hook=OrderedDict)
except Exception as e:
if error_handling is None:
raise
else:
error_handling(line, e)
@abstractmethod
def read_as_pickle(self) -> Any:
pass
def read_by_file_suffix(self) -> Any:
if self.path.endswith('.json.gz') or self.path.endswith('.json'):
return self.read_as_json()
elif self.path.endswith('.jsonl.gz') or self.path.endswith('.jsonl'):
return self.read_as_jsonl()
if self.path.endswith('.pkl.gz') or self.path.endswith('.pkl'):
return self.read_as_pickle()
raise ValueError('File suffix must be .json, .json.gz, .pkl or .pkl.gz: %s' % self.path)
def get_filtered_files_in_dir(self, file_pattern: str) -> List['RichPath']:
return list(self.iterate_filtered_files_in_dir(file_pattern))
@abstractmethod
def iterate_filtered_files_in_dir(self, file_pattern: str) -> Iterable['RichPath']:
pass
@abstractmethod
def join(self, filename: str) -> 'RichPath':
pass
@abstractmethod
def basename(self) -> str:
pass
@abstractmethod
def get_size(self) -> int:
pass
@abstractmethod
def exists(self) -> bool:
pass
@abstractmethod
def to_local_path(self) -> 'LocalPath':
pass
@abstractmethod
def relpath(self, base: 'RichPath') -> str:
pass
def copy_from(self, source_path: 'RichPath', overwrite_ok: bool=True) -> None:
if source_path.is_dir():
assert self.is_dir() or not self.exists(), 'Source path is a directory, but the target is a file.'
for file in source_path.iterate_filtered_files_in_dir('*'):
target_file_path = self.join(file.relpath(source_path))
target_file_path.copy_from(file, overwrite_ok=overwrite_ok)
else:
if not overwrite_ok and self.exists():
raise Exception('Overwriting file when copying.')
self._copy_from_file(source_path)
def _copy_from_file(self, from_file: 'RichPath') -> None:
"""Default implementation for copying a file into another. This converts the from_file to a local path
and copies from there."""
assert from_file.exists()
self._copy_from_local_file(from_file.to_local_path())
@abstractmethod
def _copy_from_local_file(self, local_file: 'LocalPath') -> None:
pass
class LocalPath(RichPath):
def __init__(self, path: str):
super().__init__(path)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.path == other.path
def __hash__(self):
return hash(self.path)
def __repr__(self):
return self.path
def is_dir(self) -> bool:
return os.path.isdir(self.path)
def is_file(self) -> bool:
return os.path.isfile(self.path)
def make_as_dir(self):
os.makedirs(self.path, exist_ok=True)
def relpath(self, base: 'LocalPath') -> str:
assert isinstance(base, LocalPath)
return os.path.relpath(self.path, base.path)
def read_as_binary(self) -> bytes:
if self.__is_gzipped(self.path):
with gzip.open(self.path) as f:
return f.read()
else:
with open(self.path, 'rb') as f:
return f.read()
def read_as_pickle(self) -> Any:
if self.__is_gzipped(self.path):
with gzip.open(self.path) as f:
return pickle.load(f)
else:
with open(self.path, 'rb') as f:
return pickle.load(f)
@staticmethod
def __is_gzipped(filename: str) -> bool:
with open(filename, 'rb') as f:
return binascii.hexlify(f.read(2)) == b'1f8b'
def save_as_compressed_file(self, data: Any) -> None:
if self.path.endswith('.json.gz'):
save_json_gz(data, self.path)
elif self.path.endswith('.jsonl.gz'):
save_jsonl_gz(data, self.path)
elif self.path.endswith('.pkl.gz'):
with gzip.GzipFile(self.path, 'wb') as outfile:
pickle.dump(data, outfile)
else:
raise ValueError('File suffix must be .json.gz or .pkl.gz: %s' % self.path)
def iterate_filtered_files_in_dir(self, file_pattern: str) -> Iterable['LocalPath']:
yield from (LocalPath(path)
for path in glob.iglob(os.path.join(self.path, file_pattern), recursive=True))
def join(self, filename: str) -> 'LocalPath':
return LocalPath(os.path.join(self.path, filename))
def basename(self) -> str:
return os.path.basename(self.path)
def get_size(self) -> int:
return os.stat(self.path).st_size
def exists(self) -> bool:
return os.path.exists(self.path)
def to_local_path(self) -> 'LocalPath':
return self
def _copy_from_local_file(self, local_file: 'LocalPath') -> None:
os.makedirs(os.path.dirname(self.path) ,exist_ok=True)
shutil.copy2(src=local_file.path, dst=self.path)
class AzurePath(RichPath):
def __init__(self, path: str, azure_container_name: str, azure_blob_service: BlockBlobService,
cache_location: Optional[str]):
super().__init__(path)
self.__container_name = azure_container_name
self.__blob_service = azure_blob_service
self.__cache_location = cache_location
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.path == other.path
and self.__container_name == other.__container_name
and self.__blob_service == other.__blob_service)
def __hash__(self):
return hash(self.path)
def __repr__(self):
return "%s%s/%s/%s" % (AZURE_PATH_PREFIX, self.__blob_service.account_name, self.__container_name, self.path)
def is_dir(self) -> bool:
blob_list = self.__blob_service.list_blobs(self.__container_name, self.path, num_results=1)
try:
blob = next(iter(blob_list))
if blob.name == self.path:
# Listing this, yields the path itself, thus it's a file.
return False
return True
except StopIteration:
return False # This path does not exist, return False by convention, similar to os.path.isdir()
def is_file(self) -> bool:
return not self.is_dir() and self.exists()
def relpath(self, base: 'AzurePath') -> str:
assert isinstance(base, AzurePath)
return os.path.relpath(self.path, base.path)
def make_as_dir(self) -> None:
# Note: Directories don't really exist in blob storage.
# Instead filenames may contain / -- thus, we have nothing to do here
pass
def read_as_binary(self) -> bytes:
if self.__cache_location is None:
return self.__read_as_binary()
cached_file_path = self.__cache_file_locally()
return cached_file_path.read_as_binary()
@property
def __cached_file_path(self) -> str:
return os.path.join(self.__cache_location, self.__container_name, self.path)
def __cache_file_locally(self, num_retries: int=1) -> LocalPath:
cached_file_path = self.__cached_file_path
cached_file_path_etag = cached_file_path+'.etag' # Create an .etag file containing the object etag
old_etag = None
if os.path.exists(cached_file_path_etag):
with open(cached_file_path_etag) as f:
old_etag = f.read()
try:
os.makedirs(os.path.dirname(cached_file_path), exist_ok=True)
# The next invocation to the blob service may fail and delete the current file. Store it elsewhere
new_filepath = cached_file_path+'.new'
blob = self.__blob_service.get_blob_to_path(self.__container_name, self.path, new_filepath,
if_none_match=old_etag)
os.rename(new_filepath, cached_file_path)
with open(cached_file_path_etag, 'w') as f:
f.write(blob.properties.etag)
except AzureHttpError as aze:
os.remove(new_filepath)
if aze.status_code != 304: # HTTP 304: Not Modified
raise
except Exception as e:
if os.path.exists(cached_file_path):
os.remove(cached_file_path) # On failure, remove the cached file, if it exits.
os.remove(cached_file_path_etag)
if num_retries == 0:
raise
else:
self.__cache_file_locally(num_retries-1)
return LocalPath(cached_file_path)
def __read_as_binary(self) -> bytes:
with io.BytesIO() as stream:
self.__blob_service.get_blob_to_stream(self.__container_name, self.path, stream)
stream.seek(0)
if binascii.hexlify(stream.read(2)) != b'1f8b':
stream.seek(0)
return stream.read()
stream.seek(0)
decompressor = zlib.decompressobj(32 + zlib.MAX_WBITS)
decompressed_data = decompressor.decompress(stream.read())
return decompressed_data
def read_as_pickle(self) -> Any:
if | |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
from datetime import datetime
import warnings
import time
import numpy as np
import pandas as pd
from scipy import stats
from numpy import dot, exp
from numpy.linalg import solve, norm, inv
from scipy.linalg import solve as spsolve
from lifelines.fitters import BaseFitter
from lifelines.fitters.coxph_fitter import CoxPHFitter
from lifelines.statistics import chisq_test
from lifelines.utils import (inv_normal_cdf,
significance_code, normalize, significance_codes_as_text,
pass_for_numeric_dtypes_or_raise, check_low_var,
check_for_overlapping_intervals, check_complete_separation_low_variance,
ConvergenceWarning, StepSizer, _get_index, check_for_immediate_deaths,
check_for_instantaneous_events, ConvergenceError, check_nans_or_infs, string_justify,
)
class CoxTimeVaryingFitter(BaseFitter):
"""
This class implements fitting Cox's time-varying proportional hazard model:
h(t|x(t)) = h_0(t)*exp(x(t)'*beta)
Parameters:
alpha: the level in the confidence intervals.
penalizer: the coefficient of an l2 penalizer in the regression
"""
def __init__(self, alpha=0.95, penalizer=0.0):
if not (0 < alpha <= 1.):
raise ValueError('alpha parameter must be between 0 and 1.')
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
self.alpha = alpha
self.penalizer = penalizer
def fit(self, df, id_col, event_col, start_col='start', stop_col='stop', weights_col=None, show_progress=False, step_size=None, robust=False):
"""
Fit the Cox Propertional Hazard model to a time varying dataset. Tied survival times
are handled using Efron's tie-method.
Parameters:
df: a Pandas dataframe with necessary columns `duration_col` and
`event_col`, plus other covariates. `duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
id_col: A subject could have multiple rows in the dataframe. This column contains
the unique identifer per subject.
event_col: the column in dataframe that contains the subjects' death
observation. If left as None, assume all individuals are non-censored.
start_col: the column that contains the start of a subject's time period.
stop_col: the column that contains the end of a subject's time period.
weights_col: the column that contains (possibly time-varying) weight of each subject-period row.
show_progress: since the fitter is iterative, show convergence
diagnostics.
step_size: set an initial step size for the fitting algorithm.
robust: Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
Returns:
self, with additional properties: hazards_
"""
self.robust = robust
if self.robust:
raise NotImplementedError("Not available yet.")
self.event_col = event_col
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
df = df.copy()
if not (id_col in df and event_col in df and start_col in df and stop_col in df):
raise KeyError("A column specified in the call to `fit` does not exist in the dataframe provided.")
if weights_col is None:
assert '__weights' not in df.columns, '__weights is an internal lifelines column, please rename your column first.'
df['__weights'] = 1.0
else:
if (df[weights_col] <= 0).any():
raise ValueError("values in weights_col must be positive.")
df = df.rename(columns={id_col: 'id', event_col: 'event', start_col: 'start', stop_col: 'stop', weights_col: '__weights'})
df = df.set_index('id')
stop_times_events = df[["event", "stop", "start"]].copy()
weights = df[['__weights']].copy().astype(float)
df = df.drop(["event", "stop", "start", "__weights"], axis=1)
stop_times_events['event'] = stop_times_events['event'].astype(bool)
self._check_values(df, stop_times_events)
df = df.astype(float)
self._norm_mean = df.mean(0)
self._norm_std = df.std(0)
hazards_ = self._newton_rhaphson(normalize(df, self._norm_mean, self._norm_std), stop_times_events, weights, show_progress=show_progress,
step_size=step_size)
self.hazards_ = pd.DataFrame(hazards_.T, columns=df.columns, index=['coef']) / self._norm_std
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(normalize(df, self._norm_mean, self._norm_std), stop_times_events, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self.baseline_cumulative_hazard_ = self._compute_cumulative_baseline_hazard(df, stop_times_events, weights)
self.baseline_survival_ = self._compute_baseline_survival()
self.event_observed = stop_times_events['event']
self.start_stop_and_events = stop_times_events
self._n_examples = df.shape[0]
self._n_unique = df.index.unique().shape[0]
return self
@staticmethod
def _check_values(df, stop_times_events):
# check_for_overlapping_intervals(df) # this is currenty too slow for production.
check_nans_or_infs(df)
check_low_var(df)
check_complete_separation_low_variance(df, stop_times_events['event'])
pass_for_numeric_dtypes_or_raise(df)
check_for_immediate_deaths(stop_times_events)
check_for_instantaneous_events(stop_times_events)
def _compute_sandwich_estimator(self, df, stop_times_events, weights):
n, d = df.shape
# Init risk and tie sums to zero
risk_phi = 0
risk_phi_x = np.zeros((1, d))
# need to store these histories, as we access them often
risk_phi_history = pd.DataFrame(np.zeros((n,)), index=df.index)
risk_phi_x_history = pd.DataFrame(np.zeros((n, d)), index=df.index)
E = E.astype(int)
score_residuals = np.zeros((n, d))
# we already unnormalized the betas in `fit`, so we need normalize them again since X is
# normalized.
beta = self.hazards_.values[0] * self._norm_std
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ei = E[i]
xi = X[i:i + 1]
phi_i = exp(dot(xi, beta))
phi_x_i = phi_i * xi
risk_phi += phi_i
risk_phi_x += phi_x_i
risk_phi_history[i] = risk_phi
risk_phi_x_history[i] = risk_phi_x
# Iterate forwards
for i in range(0, n):
# Doing it like this to preserve shape
xi = X[i:i + 1]
phi_i = exp(dot(xi, beta))
score = -sum(E[j] * weights[j] * phi_i / risk_phi_history[j] * (xi - risk_phi_x_history[j] / risk_phi_history[j]) for j in range(0, i+1))
score = score + E[i] * (xi - risk_phi_x_history[i] / risk_phi_history[i])
score *= weights[i]
score_residuals[i, :] = score
naive_var = inv(self._hessian_)
delta_betas = score_residuals.dot(naive_var) * weights[:, None]
sandwich_estimator = delta_betas.T.dot(delta_betas) / np.outer(self._norm_std, self._norm_std)
return sandwich_estimator
def _compute_standard_errors(self, df, stop_times_events, weights):
if self.robust:
se = np.sqrt(self._compute_sandwich_estimator(df, stop_times_events, weights).diagonal())
else:
se = np.sqrt(self.variance_matrix_.diagonal())
return pd.DataFrame(se[None, :],
index=['se'], columns=self.hazards_.columns)
def _compute_z_values(self):
return (self.hazards_.loc['coef'] /
self.standard_errors_.loc['se'])
def _compute_p_values(self):
U = self._compute_z_values() ** 2
return stats.chi2.sf(U, 1)
def _compute_confidence_intervals(self):
alpha2 = inv_normal_cdf((1. + self.alpha) / 2.)
se = self.standard_errors_
hazards = self.hazards_.values
return pd.DataFrame(np.r_[hazards - alpha2 * se,
hazards + alpha2 * se],
index=['lower-bound', 'upper-bound'],
columns=self.hazards_.columns)
@property
def summary(self):
"""
Summary statistics describing the fit.
Set alpha property in the object before calling.
Returns:
df: DataFrame, contains columns coef, exp(coef), se(coef), z, p, lower, upper
"""
df = pd.DataFrame(index=self.hazards_.columns)
df['coef'] = self.hazards_.loc['coef'].values
df['exp(coef)'] = exp(self.hazards_.loc['coef'].values)
df['se(coef)'] = self.standard_errors_.loc['se'].values
df['z'] = self._compute_z_values()
df['p'] = self._compute_p_values()
df['lower %.2f' % self.alpha] = self.confidence_intervals_.loc['lower-bound'].values
df['upper %.2f' % self.alpha] = self.confidence_intervals_.loc['upper-bound'].values
return df
def _newton_rhaphson(self, df, stop_times_events, weights, show_progress=False, step_size=None, precision=10e-6,
max_steps=50):
"""
Newton Rhaphson algorithm for fitting CPH model.
Note that data is assumed to be sorted on T!
Parameters:
df: (n, d) Pandas DataFrame of observations
stop_times_events: (n, d) Pandas DataFrame of meta information about the subjects history
show_progress: True to show verbous output of convergence
step_size: float > 0 to determine a starting step size in NR algorithm.
precision: the convergence halts if the norm of delta between
successive positions is less than epsilon.
Returns:
beta: (1,d) numpy array.
"""
assert precision <= 1., "precision must be less than or equal to 1."
n, d = df.shape
# make sure betas are correct size.
beta = np.zeros((d, 1))
i = 0
converging = True
ll, previous_ll = 0, 0
start = time.time()
step_sizer = StepSizer(step_size)
step_size = step_sizer.next()
while converging:
i += 1
h, g, ll = self._get_gradients(df, stop_times_events, weights, beta)
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta.T
h.flat[::d + 1] -= self.penalizer
try:
# reusing a piece to make g * inv(h) * g.T faster later
inv_h_dot_g_T = spsolve(-h, g.T, sym_pos=True)
except ValueError as e:
if 'infs or NaNs' in str(e):
raise ConvergenceError("""hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""")
else:
# something else?
raise e
delta = step_size * inv_h_dot_g_T
if np.any(np.isnan(delta)):
raise ConvergenceError("""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""")
# Save these as pending result
hessian, gradient = h, g
norm_delta = norm(delta)
newton_decrement = g.dot(inv_h_dot_g_T)/2
if show_progress:
print("Iteration %d: norm_delta = %.5f, step_size = %.5f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f" % (i, norm_delta, step_size, ll, newton_decrement, time.time() - start))
# convergence criteria
if norm_delta < precision:
converging, completed = False, True
elif previous_ll > 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
# this is what R uses by default
converging, completed = False, True
elif newton_decrement < 10e-8:
converging, completed = False, True
elif i >= max_steps:
# 50 iterations steps with N-R is a lot.
# Expected convergence is less than 10 steps
converging, completed = False, False
elif step_size <= 0.0001:
converging, completed = False, False
elif | |
<filename>dataloader/dataset.py<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SemKITTI dataloader
"""
import os
import numpy as np
import torch
import random
import time
import numba as nb
import yaml
import pickle
from torch.utils import data
from tqdm import tqdm
from scipy import stats as s
# load Semantic KITTI class info
with open("semantic-kitti.yaml", 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
SemKITTI_label_name = dict()
for i in sorted(list(semkittiyaml['learning_map'].keys()))[::-1]:
SemKITTI_label_name[semkittiyaml['learning_map'][i]] = semkittiyaml['labels'][i]
# things = ['car', 'truck', 'bicycle', 'motorcycle', 'bus', 'person', 'bicyclist', 'motorcyclist']
# stuff = ['road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole', 'traffic-sign']
# things_ids = []
# for i in sorted(list(semkittiyaml['labels'].keys())):
# if SemKITTI_label_name[semkittiyaml['learning_map'][i]] in things:
# things_ids.append(i)
# print(things_ids)
class SemKITTI(data.Dataset):
def __init__(self, data_path, imageset = 'train', return_ref = False, return_ins = False):
self.return_ref = return_ref
self.return_ins = return_ins
with open("semantic-kitti.yaml", 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
if imageset == 'train':
split = semkittiyaml['split']['train']
elif imageset == 'val':
split = semkittiyaml['split']['valid']
elif imageset == 'test':
split = semkittiyaml['split']['test']
else:
raise Exception('Split must be train/val/test')
self.im_idx = []
for i_folder in split:
self.im_idx += absoluteFilePaths('/'.join([data_path,str(i_folder).zfill(2),'velodyne']))
self.im_idx.sort()
self.things = ['car', 'truck', 'bicycle', 'motorcycle', 'bus', 'person', 'bicyclist', 'motorcyclist']
self.stuff = ['road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole', 'traffic-sign']
self.things_ids = []
for i in sorted(list(semkittiyaml['labels'].keys())):
if SemKITTI_label_name[semkittiyaml['learning_map'][i]] in self.things:
self.things_ids.append(i)
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
# print("loading {}, shape {}".format(self.im_idx[index], raw_data.shape))
if self.imageset == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:,0],dtype=int),axis=1)
sem_labels = annotated_data
ins_labels = annotated_data
valid = annotated_data
else:
annotated_data = np.fromfile(self.im_idx[index].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
sem_labels = annotated_data & 0xFFFF #delete high 16 digits binary
# ins_labels = (annotated_data & 0xFFFF0000) >> 16 # different classes could use same ins ids
ins_labels = annotated_data
# valid = (((ins_labels & 0xFFFF0000) >> 16) != 0).reshape(-1) # TODO: maybe this is not ok
valid = np.isin(sem_labels, self.things_ids).reshape(-1) # use 0 to filter out valid indexes is enough
# print(np.sum(valid) - np.sum((((ins_labels & 0xFFFF0000) >> 16) != 0)))
sem_labels = np.vectorize(self.learning_map.__getitem__)(sem_labels)
data_tuple = (raw_data[:,:3], sem_labels.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:,3],)
if self.return_ins:
data_tuple += (ins_labels, valid)
data_tuple += (self.im_idx[index],)
return data_tuple
def count_ins(self):
pbar = tqdm(total=len(self.im_idx), dynamic_ncols=True)
counter = np.zeros([9], dtype=np.int32)
min_valid_pn = 10000086
max_valid_pn = -1
for i in range(len(self.im_idx)):
# raw_data = np.fromfile(self.im_idx[i], dtype=np.float32).reshape((-1, 4))
annotated_data = np.fromfile(self.im_idx[i].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
_sem_labels = annotated_data & 0xFFFF #delete high 16 digits binary
ins_labels = annotated_data
sem_labels = np.vectorize(self.learning_map.__getitem__)(_sem_labels)
for j in range(1,9):
j_ind = (sem_labels == j)
j_ins_labels = ins_labels[j_ind]
counter[j] += np.unique(j_ins_labels).reshape(-1).shape[0]
pbar.update(1)
valid_pn = np.sum(np.isin(_sem_labels, self.things_ids).reshape(-1))
if valid_pn > max_valid_pn:
max_valid_pn = valid_pn
if valid_pn < min_valid_pn:
min_valid_pn = valid_pn
print(valid_pn, sem_labels.shape[0])
pbar.close()
counter = counter[1:]
print("Counting results: ")
print(counter)
counter = counter.astype(np.float32)
counter /= (np.min(counter) if np.min(counter) != 0 else 1.0)
print("Weights: ")
print(counter)
print("max_valid_pn: {}".format(max_valid_pn))
print("min_valid_pn: {}".format(min_valid_pn))
def count_box_size(self):
pbar = tqdm(total=len(self.im_idx), dynamic_ncols=True)
counter = np.zeros([9], dtype=np.float32)
mean_size = np.zeros([9, 2], dtype=np.float32)
max_size = np.zeros([9, 2], dtype=np.float32)
min_size = np.zeros([9, 2], dtype=np.float32) + 10086
for i in range(len(self.im_idx)):
#if i % 10 != 0:
# pbar.update(1)
# continue
raw_data = np.fromfile(self.im_idx[i], dtype=np.float32).reshape((-1, 4))
annotated_data = np.fromfile(self.im_idx[i].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
_sem_labels = annotated_data & 0xFFFF #delete high 16 digits binary
ins_labels = annotated_data
sem_labels = np.vectorize(self.learning_map.__getitem__)(_sem_labels)
pbar.update(1)
for j in range(1, 9):
j_ind = (sem_labels == j)
j_ins_labels = ins_labels[j_ind]
for j_ins_lab in np.unique(j_ins_labels):
j_pcd = raw_data[(ins_labels == j_ins_lab).reshape(-1)]
if j_pcd.shape[0] < 50:
continue
x = j_pcd[:, 0].max() - j_pcd[:, 0].min()
y = j_pcd[:, 1].max() - j_pcd[:, 1].min()
if x < y:
tmp = x
x = y
y = tmp
mean_size[j, 0] += x
mean_size[j, 1] += y
counter[j] += 1
if x > max_size[j, 0]:
max_size[j, 0] = x
if y > max_size[j, 1]:
max_size[j, 1] = y
if x < min_size[j, 0]:
min_size[j, 0] = x
if y < min_size[j, 1]:
min_size[j, 1] = y
pbar.close()
counter[0] = 1
print("Mean Size: {}".format(mean_size / counter.reshape(-1, 1)))
print("Max Size: {}".format(max_size))
print("Min Size: {}".format(min_size))
def absoluteFilePaths(directory):
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
class voxel_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug = False, flip_aug = False, ignore_label = 255, return_test = False,
fixed_volume_space= False, max_volume_space = [50,50,1.5], min_volume_space = [-50,-50,-3]):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.flip_aug = flip_aug
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz,labels = data
elif len(data) == 3:
xyz,labels,sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 4:
raise Exception('Not implement instance label for voxel_dataset')
else: raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random()*360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:,:2] = np.dot( xyz[:,:2],j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4,1)
if flip_type==1:
xyz[:,0] = -xyz[:,0]
elif flip_type==2:
xyz[:,1] = -xyz[:,1]
elif flip_type==3:
xyz[:,:2] = -xyz[:,:2]
max_bound = np.percentile(xyz,100,axis = 0)
min_bound = np.percentile(xyz,0,axis = 0)
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range/(cur_grid_size-1)
if (intervals==0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz,min_bound,max_bound)-min_bound)/intervals)).astype(np.int)
# process voxel position
voxel_position = np.zeros(self.grid_size,dtype = np.float32)
dim_array = np.ones(len(self.grid_size)+1,int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size)*intervals.reshape(dim_array) + min_bound.reshape(dim_array)
# process labels
processed_label = np.ones(self.grid_size,dtype = np.uint8)*self.ignore_label
label_voxel_pair = np.concatenate([grid_ind,labels],axis = 1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:,0],grid_ind[:,1],grid_ind[:,2])),:]
processed_label = nb_process_label(np.copy(processed_label),label_voxel_pair)
data_tuple = (voxel_position,processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5)*intervals + min_bound
return_xyz = xyz - voxel_centers
return_xyz = np.concatenate((return_xyz,xyz),axis = 1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) == 3:
return_fea = np.concatenate((return_xyz,sig[...,np.newaxis]),axis = 1)
if self.return_test:
data_tuple += (grid_ind,labels,return_fea,index)
else:
data_tuple += (grid_ind,labels,return_fea)
return data_tuple
# transformation between Cartesian coordinates and polar coordinates
def cart2polar(input_xyz):
rho = np.sqrt(input_xyz[:,0]**2 + input_xyz[:,1]**2)
phi = np.arctan2(input_xyz[:,1],input_xyz[:,0])
return np.stack((rho,phi,input_xyz[:,2]),axis=1)
def polar2cat(input_xyz_polar):
x = input_xyz_polar[0]*np.cos(input_xyz_polar[1])
y = input_xyz_polar[0]*np.sin(input_xyz_polar[1])
return np.stack((x,y,input_xyz_polar[2]),axis=0)
class spherical_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug = False, flip_aug = False,
scale_aug =False, transform_aug=False, trans_std=[0.1, 0.1, 0.1],
min_rad=-np.pi/4, max_rad=np.pi/4, ignore_label = 255,
return_test = False, fixed_volume_space= False,
max_volume_space = [50,np.pi,1.5], min_volume_space = [3,-np.pi,-3],
center_type='Axis_center'):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.scale_aug = scale_aug
self.transform = transform_aug
self.trans_std = trans_std
self.noise_rotation = np.random.uniform(min_rad, max_rad)
assert center_type in ['Axis_center', 'Mass_center']
self.center_type = center_type
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz,labels = data
elif len(data) == 3:
xyz,labels,sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 6:
xyz,labels,sig,ins_labels,valid,pcd_fname = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 7:
xyz,labels,sig,ins_labels,valid,pcd_fname,minicluster = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else: raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random()*360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:,:2] = np.dot( xyz[:,:2],j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4,1)
if flip_type==1:
xyz[:,0] = -xyz[:,0]
elif flip_type==2:
xyz[:,1] = -xyz[:,1]
elif flip_type==3:
xyz[:,:2] = -xyz[:,:2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:,0] = noise_scale * xyz[:,0]
xyz[:,1] = noise_scale * xyz[:,1]
if self.transform:
noise_translate = np.array([np.random.normal(0, self.trans_std[0], 1),
np.random.normal(0, self.trans_std[1], 1),
np.random.normal(0, self.trans_std[2], 1)]).T
xyz[:, 0:3] += noise_translate
# convert coordinate into polar coordinates
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:,0],100,axis = 0)
min_bound_r = np.percentile(xyz_pol[:,0],0,axis = 0)
max_bound = np.max(xyz_pol[:,1:],axis = 0)
min_bound = np.min(xyz_pol[:,1:],axis = 0)
max_bound = np.concatenate(([max_bound_r],max_bound))
min_bound = np.concatenate(([min_bound_r],min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range/(cur_grid_size-1) # (size-1) could directly get index starting from | |
<reponame>ragerdl/dtrace-stap-book
import os
import sys
import datetime
from tsdoc.blocks import *
import zipfile
from zipfile import ZipFile
from tempfile import NamedTemporaryFile
from xml.etree import ElementTree as etree
class _ZipStream(object):
def __init__(self, zipf, fname):
self.zipf = zipf
self.fname = fname
self.tmpf = NamedTemporaryFile()
def __del__(self):
self.tmpf.flush()
self.zipf.write(self.tmpf.name, self.fname)
def __getattr__(self, attrib):
return getattr(self.tmpf, attrib)
class EpubPrinter(Printer):
single_doc = True
stream_mode = 'wb'
xref_pages = False
NAMESPACES = {'XML': 'http://www.w3.org/XML/1998/namespace',
'EPUB': 'http://www.idpf.org/2007/ops',
'DAISY': 'http://www.daisy.org/z3986/2005/ncx/',
'OPF': 'http://www.idpf.org/2007/opf',
'CONTAINERNS': 'urn:oasis:names:tc:opendocument:xmlns:container',
'DC': "http://purl.org/dc/elements/1.1/",
'XHTML': 'http://www.w3.org/1999/xhtml'}
CONTAINER_PATH = 'META-INF/container.xml'
CONTAINER_XML = '''<?xml version='1.0' encoding='utf-8'?>
<container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0">
<rootfiles>
<rootfile media-type="application/oebps-package+xml" full-path="{folder_name}/content.opf"/>
</rootfiles>
</container>
'''
XML_HEADER = '<?xml version="1.0" encoding="utf-8"?>'
HTML_HEADER = '<!DOCTYPE html>'
FOLDER_NAME = 'EPUB'
IDENTIFIER_ID = 'pub-identifier'
SOURCE_URL = 'https://raw.githubusercontent.com/myaut/dtrace-stap-book/master/'
IMAGE_PATH = os.environ.get('TSDOC_IMGDIR')
CSS_PATH = 'book/epub.css'
INCUT_CLASSES = { 'DEF' : 'Definition',
'WARN' : 'Warning',
'INFO': 'Information',
'NOTE': 'Note',
'DANGER': 'DANGER!' }
def __init__(self):
self._epub = None
self._page_name = None
self._anchor_prefix = ''
self._page_stream = None
self._page_root = None
self._page_body = None
self._page_div = None
self._docspaces = []
self._ncx_stream = None
self._ncx_root = None
self._toc_stream = None
self._toc_root = None
self._toc_state = None
self._page_info = {}
self.print_sources = not os.environ.get('TSDOC_NO_SOURCES')
self.uid = os.environ.get('TSDOC_UID')
self.title = os.environ.get('TSDOC_HEADER')
self._metadata = [('DC', 'identifier', self.uid),
('DC', 'title', self.title),
('DC', 'creator', os.environ.get('TSDOC_AUTHOR')),
('DC', 'language', 'en')]
self._items = []
self._images = set()
def do_print_pages(self, stream, header, pages):
''' Generates EPUB. EPUB is a zip file with bunch of XMLs/XHTMLs.
List of files within EPUB is tracked via self._items. Order of creating items
is crucial for "spine" items as they appear in order they were created in book.
HTMLs are added in this order:
- Cover cover.html
- TOC toc01.html
For each DocSpaceIndex page:
- Chapter chXX.html created
Each XML is HTML is wrapped as _ZipStream object which is temporary file which is
is zipped and deleted on deletion. We also keep root tag to render XML to it.
When rendering is complete, OPF file is generated.'''
self._epub = ZipFile(stream, 'w', zipfile.ZIP_DEFLATED)
self._epub.writestr('mimetype', 'application/epub+zip',
compress_type=zipfile.ZIP_STORED)
self._ncx_stream = self._create_item('toc.ncx', 'toc.ncx',
"application/x-dtbncx+xml", unique=True)
self._add_css()
pages = iter(pages)
index = next(pages)
self._write_cover(index, header)
self._toc_stream = self._create_item('toc', 'toc01.html', "application/xhtml+xml",
spine=True, xattr={'properties': 'nav'})
for page in pages:
if hasattr(page, 'is_external'):
# This is index page of docspace
header = page.header
self._create_chapter(page.docspace, header)
self._add_toc_entry(page)
hdr = etree.SubElement(self._page_div, 'h2')
hdr.text = header
continue
if self._page_div is None:
continue
self._add_toc_entry(page)
for block in page.blocks:
el = self._render_block(block)
if el is not None:
self._page_div.append(el)
self._finish_chapter()
self._write_toc()
self._write_container()
self._write_opf_file()
def _write_cover(self, index, header):
''' Process index page:
- Generates cover.html from index blocks and title block
- Finds docspace names in index page to get docspace order
- Walks index references recursively to get metadata for TOC'''
stream = self._create_item('cover', 'cover.html', "application/xhtml+xml",
spine=True)
root, body = self._create_html(self.title)
title = etree.SubElement(body, 'h1')
title.text = self.title
blocks = iter(index)
for block in blocks:
if any(isinstance(part, Reference) and part.text == '__endbackpage__'
for part in block):
break
el = self._render_block(block)
if el is not None:
body.append(el)
self._write_etree_stream(root, stream)
self._docspaces = [docspace.docspace
for docspace in index.docspaces]
self._find_page_info(blocks)
def _find_page_info(self, parts, level=1):
for part in parts:
if isinstance(part, ListEntry):
level = part.level
if isinstance(part, Block):
self._find_page_info(part.parts, level)
if isinstance(part, Link) and part.type == Link.INTERNAL:
self._page_info[part.where] = (level, part.text)
def _add_toc_entry(self, page):
''' Adds TOC entry to toc01.html and toc.ncx. Both file has hierarchial organization,
so it takes level from _page_info cache (generated by _write_cover from index page),
advances tree if there is not enough levels and generates TOC element as a leaf of that tree.
Tree is maintained as a pair of stacks -- for toc (HTML) and ncx files, so new element will
be added to a last leaf element.
Also generates section for rendering page and saves it to _page_div '''
if hasattr(page, 'is_external'):
level = 0
header = page.header.strip()
else:
level, header = self._page_info[page.page_path]
name = page.page_path.replace('/', '_')
# Generate section and div for this section
section = etree.SubElement(self._page_body, 'section', {'data-type': "chapter"})
self._page_div = etree.SubElement(section, 'div', {'id': name})
self._anchor_prefix = name + '_'
if self._toc_state is None:
# initialize ncx state
self._ncx_root = etree.Element('ncx', {'xmlns' : self.NAMESPACES['DAISY'],
'version' : '2005-1'})
head = etree.SubElement(self._ncx_root, 'head')
el = etree.SubElement(head, 'meta', {'name': 'cover', 'content': 'cover'})
etree.SubElement(head, 'meta', {'name': 'dtb:uid', 'content': self.uid})
doc_title = etree.SubElement(self._ncx_root, 'docTitle')
doc_title_text = etree.SubElement(doc_title, 'text')
doc_title_text.text = self.title
nav_map = etree.SubElement(self._ncx_root, 'navMap')
# initialize toc01.html
self._toc_root, toc_body = self._create_html('Table of contents')
toc_nav = etree.SubElement(toc_body, 'nav', {'data-type': 'toc',
'id': 'id-{0}'.format(id(self._toc_root))})
toc_ol = etree.SubElement(toc_nav, 'ol')
self._toc_state = (1, [nav_map], [toc_ol])
order, stack, toc_stack = self._toc_state
# Create nav point
nav_point = etree.Element('navPoint', {'id': 'np-{0}'.format(id(page)),
'playOrder': str(order)})
nav_label = etree.SubElement(nav_point, 'navLabel')
nav_label_text = etree.SubElement(nav_label, 'text')
nav_label_text.text = header
# Generate path to anchor
src = '{0}#{1}'.format(self._page_name, name)
etree.SubElement(nav_point, 'content', {'src': src})
# Create toc hierarchy based on index.md list hierarchy -- unwind stack
# if we had go to upper level, and append new child to corresponding level
while len(stack) > (level + 1):
stack.pop()
stack[-1].append(nav_point)
stack.append(nav_point)
# Now create an entry in html version of toc
toc_li = etree.Element('li')
toc_link = etree.SubElement(toc_li, 'a', {'href': src})
toc_link.text = header
while len(toc_stack) > (level + 1):
toc_stack.pop()
if level >= len(toc_stack):
toc_stack.append(etree.SubElement(toc_stack[-1]._children[-1], 'ol'))
toc_stack[-1].append(toc_li)
self._toc_state = (order + 1, stack, toc_stack)
def _write_toc(self):
''' Completes and writes toc and ncx files '''
self._write_etree_stream(self._ncx_root, self._ncx_stream)
self._ncx_stream = None
self._write_etree_stream(self._toc_root, self._toc_stream)
self._toc_stream = None
def _write_container(self):
''' Generates container.xml '''
container_xml = self.CONTAINER_XML.format(folder_name=self.FOLDER_NAME)
self._epub.writestr(self.CONTAINER_PATH, container_xml)
def _write_etree_file(self, root, fname):
''' Helper for writing xml directly to EPUB zip. File will be saved as fname '''
tree_str = self.XML_HEADER
tree_str += etree.tostring(root, encoding='utf-8')
self._epub.writestr(os.path.join(self.FOLDER_NAME, fname), tree_str)
def _write_etree_stream(self, root, stream, header='xml'):
''' Writes xml tag root to a stream of instance _ZipStream '''
stream.write(self.XML_HEADER)
if header == 'html':
stream.write(self.HTML_HEADER)
stream.write(etree.tostring(root, encoding='utf-8'))
def _write_opf_file(self):
''' Generates OPF file -- literally EPUB's index. It has several sections:
- metadata contains title, author, etc. as it was set by constructor
- manifest contains list of all files created by _create_item
- spine defines order of htmls in a book. elements are added to spine if
spine flag was set in _create_item
- guide contains other references -- currently it is TOC and Cover references '''
root = etree.Element('package',
{'xmlns' : self.NAMESPACES['OPF'],
'xmlns:dc' : self.NAMESPACES['DC'],
'unique-identifier' : self.IDENTIFIER_ID,
'version' : '3.0'})
root.attrib['prefix'] = 'rendition: http://www.ipdf.org/vocab/rendition/#'
## METADATA
metadata = etree.SubElement(root, 'metadata')
el = etree.SubElement(metadata, 'meta', {'property':'dcterms:modified'})
el.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
for ns_name, aname, text in self._metadata:
if ns_name == 'DC':
el = etree.SubElement(metadata, 'dc:' + aname, {'id': 'pub-' + aname})
el.text = text
el = etree.SubElement(metadata, 'meta', {'property': 'dcterms:' + aname,
'id': 'meta-' + aname})
el.text = text
el = etree.SubElement(metadata, 'meta', {'name': 'cover',
'content': 'cover'})
# MANIFEST
manifest = etree.SubElement(root, 'manifest')
for id, fname, mimetype, _, xattr in self._items:
attr = xattr.copy()
attr.update({'id': id, 'href': fname, 'media-type': mimetype})
el = etree.SubElement(manifest, 'item', attr)
# SPINE
spine = etree.SubElement(root, 'spine', {'toc': 'toc.ncx'})
for id, _, _, is_spine, _ in self._items:
if is_spine:
el = etree.SubElement(spine, 'itemref', {'idref': id})
# GUIDE
guide = etree.SubElement(root, 'guide')
etree.SubElement(guide, 'reference', {'href': 'cover.html',
'type': 'cover',
'title': 'Cover'})
etree.SubElement(guide, 'reference', {'href': 'toc01.html',
'type': 'toc',
'title': 'Table of contents'})
self._write_etree_file(root, 'content.opf')
def _create_item(self, id, fname, mimetype, spine=False, unique=False, xattr={}):
''' Create new file inside epub:
- id, unique -- id prefix (unique number is added if unique=True)
- fname -- path with EPUB/ dir
- mimetype - mime type
- spine -- should be set | |
user pool.
- **UserPoolClientId** *(string) --* **[REQUIRED]**
The ID of the Amazon Cognito user pool client.
- **UserPoolDomain** *(string) --* **[REQUIRED]**
The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.
- **SessionCookieName** *(string) --*
The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.
- **Scope** *(string) --*
The set of user claims to be requested from the IdP. The default is ``openid`` .
To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.
- **SessionTimeout** *(integer) --*
The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).
- **AuthenticationRequestExtraParams** *(dict) --*
The query parameters (up to 10) to include in the redirect request to the authorization endpoint.
- *(string) --*
- *(string) --*
- **OnUnauthenticatedRequest** *(string) --*
The behavior if the user is not authenticated. The following are possible values:
* deny- Return an HTTP 401 Unauthorized error.
* allow- Allow the request to be forwarded to the target.
* authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.
- **Order** *(integer) --*
The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a ``forward`` or a ``fixed-response`` action.
- **RedirectConfig** *(dict) --*
[Application Load Balancer] Information for creating a redirect action. Specify only when ``Type`` is ``redirect`` .
- **Protocol** *(string) --*
The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.
- **Port** *(string) --*
The port. You can specify a value from 1 to 65535 or #{port}.
- **Host** *(string) --*
The hostname. This component is not percent-encoded. The hostname can contain #{host}.
- **Path** *(string) --*
The absolute path, starting with the leading \"/\". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.
- **Query** *(string) --*
The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading \"?\", as it is automatically added. You can specify any of the reserved keywords.
- **StatusCode** *(string) --* **[REQUIRED]**
The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302).
- **FixedResponseConfig** *(dict) --*
[Application Load Balancer] Information for creating an action that returns a custom HTTP response. Specify only when ``Type`` is ``fixed-response`` .
- **MessageBody** *(string) --*
The message.
- **StatusCode** *(string) --* **[REQUIRED]**
The HTTP response code (2XX, 4XX, or 5XX).
- **ContentType** *(string) --*
The content type.
Valid Values: text/plain | text/css | text/html | application/javascript | application/json
:rtype: dict
:returns:
"""
pass
def modify_target_group(self, TargetGroupArn: str, HealthCheckProtocol: str = None, HealthCheckPort: str = None, HealthCheckPath: str = None, HealthCheckEnabled: bool = None, HealthCheckIntervalSeconds: int = None, HealthCheckTimeoutSeconds: int = None, HealthyThresholdCount: int = None, UnhealthyThresholdCount: int = None, Matcher: Dict = None) -> Dict:
"""
Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyTargetGroup>`_
**Request Syntax**
::
response = client.modify_target_group(
TargetGroupArn='string',
HealthCheckProtocol='HTTP'|'HTTPS'|'TCP'|'TLS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckEnabled=True|False,
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
**Response Syntax**
::
{
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS'|'TCP'|'TLS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS'|'TCP'|'TLS',
'HealthCheckPort': 'string',
'HealthCheckEnabled': True|False,
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
],
'TargetType': 'instance'|'ip'|'lambda'
},
]
}
**Response Structure**
- *(dict) --*
- **TargetGroups** *(list) --*
Information about the modified target group.
- *(dict) --*
Information about a target group.
- **TargetGroupArn** *(string) --*
The Amazon Resource Name (ARN) of the target group.
- **TargetGroupName** *(string) --*
The name of the target group.
- **Protocol** *(string) --*
The protocol to use for routing traffic to the targets.
- **Port** *(integer) --*
The port on which the targets are listening.
- **VpcId** *(string) --*
The ID of the VPC for the targets.
- **HealthCheckProtocol** *(string) --*
The protocol to use to connect with the target.
- **HealthCheckPort** *(string) --*
The port to use to connect with the target.
- **HealthCheckEnabled** *(boolean) --*
Indicates whether health checks are enabled.
- **HealthCheckIntervalSeconds** *(integer) --*
The approximate amount of time, in seconds, between health checks of an individual target.
- **HealthCheckTimeoutSeconds** *(integer) --*
The amount of time, in seconds, during which no response means a failed health check.
- **HealthyThresholdCount** *(integer) --*
The number of consecutive health checks successes required before considering an unhealthy target healthy.
- **UnhealthyThresholdCount** *(integer) --*
The number of consecutive health check failures required before considering the target unhealthy.
- **HealthCheckPath** *(string) --*
The destination for the health check request.
- **Matcher** *(dict) --*
The HTTP codes to use when checking for a successful response from a target.
- **HttpCode** *(string) --*
The HTTP codes.
For Application Load Balancers, you can specify values between 200 and 499, and the default value is 200. You can specify multiple values (for example, "200,202") or a range of values (for example, "200-299").
For Network Load Balancers, this is 200–399.
- **LoadBalancerArns** *(list) --*
The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
- *(string) --*
- **TargetType** *(string) --*
The type of target that you must specify when registering targets with this target group. The possible values are ``instance`` (targets are specified by instance ID) or ``ip`` (targets are specified by IP address).
:type TargetGroupArn: string
:param TargetGroupArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the target group.
:type HealthCheckProtocol: string
:param HealthCheckProtocol:
The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported for health checks only if the protocol of the target group is TCP or TLS. The TLS protocol is not supported for health checks.
If the protocol of the target group is TCP, you can\'t modify this setting.
:type HealthCheckPort: string
:param HealthCheckPort:
The port the load balancer uses when performing health checks on targets.
:type HealthCheckPath: string
:param HealthCheckPath:
[HTTP/HTTPS health checks] The ping path that is the destination for the health check request.
:type HealthCheckEnabled: boolean
:param HealthCheckEnabled:
Indicates whether health checks are enabled.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds:
The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5–300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds.
If the protocol of the target group is TCP, you can\'t modify this setting.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds:
[HTTP/HTTPS health checks] The amount of time, in seconds, during which no response means a failed health check.
If the protocol of the target group is TCP, you can\'t modify this setting.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount:
The number of consecutive health checks successes required before considering an unhealthy target healthy.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount:
The number of consecutive health check failures required before considering the target unhealthy. For Network Load Balancers, this value must be the same as the healthy threshold count.
:type Matcher: dict
:param Matcher:
[HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful response from a target.
If the protocol of the target group is TCP, you can\'t modify this setting.
- **HttpCode** *(string) --* **[REQUIRED]**
The HTTP codes.
For Application Load Balancers, you can specify values between 200 and | |
contentChanged = pyqtSignal(QDateTime)
@property
def content(self):
return self._content
@content.setter
def content(self, date_time:QDateTime):
self._content = date_time
self.contentChanged.emit(self._content)
class DateTimePickerSeries(QWidget):
def __init__(self, model, display:str):
super(DateTimePickerSeries, self).__init__()
self.model = model
date_time = QDateTimeEdit(self.model.content)
date_time.setDisplayFormat(display)
date_time.dateTimeChanged.connect(self.set_model_date_time)
self.model.contentChanged.connect(date_time.setDateTime)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(date_time)
self.setLayout(layout)
def set_model_date_time(self, date_time:QDateTime):
self.model.content = date_time
class Popup(QDialog):
def __init__(self, schedule:Schedule, parent=None, edit_data=None):
super(Popup, self).__init__(parent)
self.schedule = schedule
self.event_name, self.data = edit_data if edit_data is not None else (None, None)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
self.setWindowTitle("Add New Scheduled Notes" if edit_data is None else "Edit {} Details".format(self.event_name))
self.layout = QVBoxLayout()
self.layout.setSpacing(0)
self.form_layout = QFormLayout()
self.form_layout.setContentsMargins(0, 0, 0, self.form_layout.verticalSpacing())
self.form_layout_widget = QWidget()
self.form_layout_widget.setLayout(self.form_layout)
#The amount of fields in the form that come before the block section (name, #blocks, start, end date, color)
self.rows_before_blocks = 3
self.event_type = QPushButton()
event_type_menu = QMenu()
event_type_menu.addAction("Class")
event_type_menu.addSection("Event")
event_type_menu.addAction("One Time Event")
event_type_menu.addAction("Recurring Event")
event_type_menu.addAction("One Time Class Event")
for action in event_type_menu.actions():
if not action.isSeparator():
action.triggered.connect(lambda state, x=action.text(): self.set_type(x))
self.event_type.setMenu(event_type_menu)
self.form_layout.addRow("Type:", self.event_type)
#Class Title
self.name_edit = QLineEdit()
self.form_layout.addRow("Name:", self.name_edit)
#Color
self.color_picker = QColorDialog()
self.color_button = QPushButton("Pick Color")
self.color_button.clicked.connect(self.color_picker.open)
self.color_picker.currentColorChanged.connect(self.update_color)
self.form_layout.addRow("Color Code:", self.color_button)
# Initialize widgets to be added later
self.start_date_model = DateTimePickerSeriesModel(self)
self.class_start_date = DateTimePickerSeries(self.start_date_model, "MMM d yyyy")
self.event_start_date = DateTimePickerSeries(self.start_date_model, "MMM d yyyy")
self.end_date_model = DateTimePickerSeriesModel(self)
self.class_end_date = DateTimePickerSeries(self.end_date_model, "MMM d yyyy")
self.event_end_date = DateTimePickerSeries(self.end_date_model, "MMM d yyyy")
self.event_date_model = DateTimePickerSeriesModel(self)
self.class_event_date = DateTimePickerSeries(self.event_date_model, "MMM d yyyy hh:mm:AP")
self.event_date = DateTimePickerSeries(self.event_date_model, "MMM d yyyy hh:mm:AP")
# Blocks
self.blocks = 1
self.spin_box = QSpinBox()
self.spin_box.setValue(1)
self.spin_box.setMinimum(1)
self.spin_box.setMaximum(7)
self.spin_box.valueChanged.connect(self.update_blocks)
self.class_picker = QPushButton()
class_picker_menu = QMenu()
for class_name in self.schedule.schedule.keys():
if self.schedule.schedule[class_name]["type"] != "class":
continue
class_action = QAction(class_name, parent=class_picker_menu)
class_action.triggered.connect(lambda state, x=class_action.text(): self.class_picker.setText(x))
class_picker_menu.addAction(class_action)
class_picker_menu.aboutToShow.connect(
lambda: class_picker_menu.setMinimumWidth(self.class_picker.width()))
self.class_picker.setMenu(class_picker_menu)
self.stack = QStackedWidget()
self.stack.setContentsMargins(0, 0, 0, 0)
class_layout = QFormLayout()
class_layout.setContentsMargins(0, 0, 0, class_layout.verticalSpacing())
class_layout.addRow("Start Date:", self.class_start_date)
class_layout.addRow("End Date:", self.class_end_date)
class_layout.addRow("Weekly Blocks:", self.spin_box)
class_layout.addRow("Block Time:", ClassTimePicker())
self.class_options = QWidget()
self.class_options.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.class_options.setLayout(class_layout)
recurring_event_layout = QFormLayout()
recurring_event_layout.setContentsMargins(0, 0, 0, recurring_event_layout.verticalSpacing())
recurring_event_layout.addRow("Start Date:", self.event_start_date)
recurring_event_layout.addRow("End Date:", self.event_end_date)
self.recurring_event_time_picker = ClassTimePicker()
recurring_event_layout.addRow("Event Time:", self.recurring_event_time_picker)
self.recurring_event_options = QWidget()
self.recurring_event_options.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.recurring_event_options.setLayout(recurring_event_layout)
one_time_event_layout = QFormLayout()
one_time_event_layout.setContentsMargins(0, 0, 0, one_time_event_layout.verticalSpacing())
one_time_event_layout.addRow("Event Date:", self.event_date)
self.one_time_event_options = QWidget()
self.one_time_event_options.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.one_time_event_options.setLayout(one_time_event_layout)
class_event_layout = QFormLayout()
class_event_layout.setContentsMargins(0, 0, 0, class_event_layout.verticalSpacing())
class_event_layout.addRow("Class:", self.class_picker)
class_event_layout.addRow("Event Date:", self.class_event_date)
self.class_event_options = QWidget()
self.class_event_options.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.class_event_options.setLayout(class_event_layout)
self.stack.addWidget(self.class_event_options)
self.stack.addWidget(self.one_time_event_options)
self.stack.addWidget(self.recurring_event_options)
self.stack.addWidget(self.class_options)
if self.data is None:
self.set_type("Class")
self.layout.addWidget(self.form_layout_widget)
self.layout.addWidget(self.stack)
self.setLayout(self.layout)
self.show_buttons()
#Update Values if self.data is defined
if self.data is not None:
event_type = self.data["type"]
self.set_type(camel_case(event_type))
# noinspection PyTypeChecker
class_layout: QFormLayout = self.stack.currentWidget().layout()
self.name_edit.setText(self.event_name)
self.name_edit.setDisabled(True)
self.color_picker.setCurrentColor(to_qcolor(self.data["color"]))
if event_type in ["class", "recurring event"]:
self.start_date_model.content = QDateTime(to_qdate(self.data["start"]))
self.end_date_model.content = QDateTime(to_qdate(self.data["end"]))
if event_type == "class":
blocks = self.data["blocks"]
self.update_blocks(len(blocks))
for i, row in enumerate(range(self.rows_before_blocks, class_layout.rowCount())):
block = blocks[i]
# noinspection PyTypeChecker
block_widget: ClassTimePicker = class_layout.itemAt(row, QFormLayout.FieldRole).widget()
block_widget.set_time(to_qtime(block["time"]))
block_widget.set_day(block["day"])
if event_type == "recurring event":
self.recurring_event_time_picker.set_day(self.data["day"])
self.recurring_event_time_picker.set_time(to_qtime(self.data["time"]))
if event_type in ["one time event", "one time class event"]:
date_time = QDateTime()
date_time.setDate(to_qdate(self.data["date"]))
date_time.setTime(to_qtime(self.data["time"]))
self.event_date_model.content = date_time
if event_type == "one time class event":
self.class_picker.setText(self.data["class_name"])
def show_buttons(self):
save_button = QDialogButtonBox.Save if self.data is None else QDialogButtonBox.Apply
cancel_button = QDialogButtonBox.Cancel
buttonBox = QDialogButtonBox(Qt.Horizontal)
buttonBox.addButton(save_button).clicked.connect(self.accept)
buttonBox.addButton(cancel_button).clicked.connect(self.reject)
if self.data is not None:
delete_button = buttonBox.addButton(QDialogButtonBox.Discard)
delete_button.setText("Delete")
delete_button.clicked.connect(self.delete_event)
self.layout.addWidget(buttonBox)
def set_type(self, event_type:str):
if self.event_type.text() == event_type:
return
self.event_type.setText(event_type)
self.stack.currentWidget().setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
if event_type == "Class":
self.class_options.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.class_options.adjustSize()
self.stack.setCurrentWidget(self.class_options)
elif event_type == "Recurring Event":
self.recurring_event_options.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.recurring_event_options.adjustSize()
self.stack.setCurrentWidget(self.recurring_event_options)
elif event_type == "One Time Event":
self.one_time_event_options.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.one_time_event_options.adjustSize()
self.stack.setCurrentWidget(self.one_time_event_options)
elif event_type == "One Time Class Event":
self.class_event_options.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.class_event_options.adjustSize()
self.stack.setCurrentWidget(self.class_event_options)
self.stack.adjustSize()
max_width = 0
for i in range(self.form_layout.rowCount()):
widget = self.form_layout.itemAt(i, QFormLayout.LabelRole).widget()
widget.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
widget.adjustSize()
max_width = max(widget.size().width(), max_width)
# noinspection PyTypeChecker
current_widget_layout:QFormLayout = self.stack.currentWidget().layout()
for i in range(current_widget_layout.rowCount()):
widget = current_widget_layout.itemAt(i, QFormLayout.LabelRole).widget()
widget.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
widget.adjustSize()
max_width = max(widget.size().width(), max_width)
for i in range(self.form_layout.rowCount()):
self.form_layout.itemAt(i, QFormLayout.LabelRole).widget().setMinimumWidth(max_width)
for i in range(current_widget_layout.rowCount()):
current_widget_layout.itemAt(i, QFormLayout.LabelRole).widget().setMinimumWidth(max_width)
self.adjustSize()
def update_color(self):
self.color_button.setStyleSheet("background-color: rgb({},{},{})".format(self.color_picker.currentColor().red(),
self.color_picker.currentColor().green(),
self.color_picker.currentColor().blue()))
def update_blocks(self, value):
if self.spin_box.value() != value:
self.spin_box.setValue(value)
return
if self.blocks == value:
return
old_blocks = self.blocks
self.blocks = value
class_options_layout:QFormLayout = self.class_options.layout()
if self.blocks > old_blocks:
#Change label of block 1
if old_blocks == 1:
class_options_layout.itemAt(self.rows_before_blocks, QFormLayout.LabelRole).widget().setText("Block 1 Time:")
for i in range(1, self.blocks - old_blocks + 1):
offset = self.rows_before_blocks + old_blocks + i - 1
widget = class_options_layout.itemAt(offset, QFormLayout.FieldRole)
label = class_options_layout.itemAt(offset, QFormLayout.LabelRole)
if widget is not None and label is not None:
widget = widget.widget()
label = label.widget()
widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
widget.adjustSize()
label.adjustSize()
widget.show()
label.show()
else:
picker = ClassTimePicker()
picker.sizePolicy().setRetainSizeWhenHidden(False)
class_options_layout.addRow("Block {} Time:".format(old_blocks + i), picker)
elif self.blocks < old_blocks:
if self.blocks == 1:
class_options_layout.itemAt(self.rows_before_blocks, QFormLayout.LabelRole).widget().setText("Block Time:")
for i in range(old_blocks - self.blocks):
offset = self.rows_before_blocks + old_blocks + i - 1
widget = class_options_layout.itemAt(offset, QFormLayout.FieldRole).widget()
label = class_options_layout.itemAt(offset, QFormLayout.LabelRole).widget()
widget.hide()
label.hide()
widget.adjustSize()
label.adjustSize()
self.class_options.adjustSize()
self.stack.adjustSize()
self.adjustSize()
# self.class_options.adjustSize()
# self.stack.adjustSize()
# self.adjustSize()
def get_name(self):
return self.name_edit.text()
def get_data(self):
event_type = self.event_type.text()
data = {
"type": event_type.lower(),
"name": self.get_name(),
"color": {
"r": self.color_picker.currentColor().red(),
"g": self.color_picker.currentColor().green(),
"b": self.color_picker.currentColor().blue(),
}
}
if event_type == "Class":
block_data = []
# noinspection PyTypeChecker
class_layout:QFormLayout = self.stack.currentWidget().layout()
for row in range(self.rows_before_blocks, class_layout.rowCount()):
# noinspection PyTypeChecker
block_widget:ClassTimePicker = class_layout.itemAt(row, QFormLayout.FieldRole).widget()
if block_widget.isHidden():
continue
time = block_widget.get_time()
block_data.append({
"day": block_widget.day_picker.get_day(),
"time": {
"hour": time.hour(),
"minute": time.minute()
}
})
data["blocks"] = block_data
if event_type in ["Class", "Recurring Event"]:
start_date = self.start_date_model.content.date()
data["start"] = {
"day": start_date.day(),
"month": start_date.month(),
"year": start_date.year()
}
end_date = self.end_date_model.content.date()
data["end"] = {
"day": end_date.day(),
"month": end_date.month(),
"year": end_date.year()
}
if event_type == "Recurring Event":
data["day"] = self.recurring_event_time_picker.day_picker.get_day()
time = self.recurring_event_time_picker.get_time()
data["time"] = {
"hour": time.hour(),
"minute": time.minute()
}
if event_type == "One Time Class Event":
data["class_name"] = self.class_picker.text()
if event_type in ["One Time Event", "One Time Class Event"]:
date_time = self.event_date_model.content
date = date_time.date()
time = date_time.time()
data["date"] = {
"day": date.day(),
"month": date.month(),
"year": date.year(),
}
data["time"] = {
"hour": time.hour(),
"minute": time.minute()
}
return data
def delete_event(self):
error = QMessageBox()
error.setText("Are you sure you would like to delete this event?")
error.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)
result = error.exec_()
if result == QMessageBox.Yes:
self.schedule.delete_event(self.event_name)
self.reject()
def accept(self):
event_type = self.event_type.text()
if event_type == "":
error = QMessageBox()
error.setText("Please select a type for the event.")
error.exec_()
self.event_type.setFocus()
return
# Check Name
if len(self.get_name()) == 0:
error = QMessageBox()
error.setText("Please enter a name for the event.")
error.exec_()
self.name_edit.setFocus()
return
if event_type in ["Class", "Recurring Event"]:
# Check Start/End Date
start_date = self.start_date_model.content.date()
end_date = self.end_date_model.content.date()
if start_date >= end_date:
error = QMessageBox()
error.setText("End date cannot {} start date.".format("be equal to" if start_date == end_date else "come before"))
error.exec_()
if event_type == "Class":
self.class_end_date.setFocus()
else:
self.event_end_date.setFocus()
return
if event_type == "Class":
# Check Blocks
# noinspection PyTypeChecker
class_layout:QFormLayout = self.stack.currentWidget().layout()
for row in range(self.rows_before_blocks, class_layout.rowCount()):
block_widget = class_layout.itemAt(row, QFormLayout.FieldRole).widget()
if block_widget.isHidden():
continue
# Make sure a day is selected for each block
if not block_widget.is_valid():
block_name = "the class block" if self.blocks == 1 else str.lower(
class_layout.itemAt(row, QFormLayout.LabelRole).widget().text()).replace(" time:", "")
error = QMessageBox()
error.setText("Please select a valid day for {}.".format(block_name))
error.exec_()
return
# Check for duplicate blocks
for other in range(self.rows_before_blocks, class_layout.rowCount() - 1):
if row == other:
continue
other_block_widget = class_layout.itemAt(other, QFormLayout.FieldRole).widget()
same_time = block_widget.get_time() == other_block_widget.get_time()
same_day = block_widget.day_picker.get_day() == other_block_widget.day_picker.get_day()
if same_time and same_day:
error = QMessageBox()
error.setText("Block {} and {} cannot have the same day and time.".format(
row - self.rows_before_blocks+1, other - self.rows_before_blocks+1))
error.exec_()
return
if event_type == "Recurring Event":
# Make sure a day is selected
if not self.recurring_event_time_picker.is_valid():
error = QMessageBox()
error.setText("Please select a valid day for this event.")
error.exec_()
self.recurring_event_time_picker.setFocus()
return
if event_type == "One Time Class Event":
# Check Class
if len(self.class_picker.text()) == 0:
error = QMessageBox()
error.setText("Please select a class for this event.")
error.exec_()
self.class_picker.setFocus()
return
# Valid name
if self.get_name() in self.schedule.schedule.keys():
if self.data is None:
error = QMessageBox()
error.setText("An event with this name already exists, would you like to overwrite it?")
error.setStandardButtons(error.Apply | error.Cancel)
result = error.exec_()
if result == error.Apply:
to_overwrite = self.schedule.schedule[self.get_name()]
if to_overwrite["type"] == "class":
if self.event_type.text() == "One Time Class Event":
if self.class_picker.text() == self.get_name():
error = QMessageBox()
error.setText("Cannot overwrite a class | |
<filename>src/analyses/plot/plot_utils.py
import pandas as pd
import numpy as np
import seaborn as sns
import seaborn as sn
from training.config import Config
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from scipy.optimize import curve_fit
def get_heatmaps(data, no_pred=False):
"""
get_heatmaps(data, no_pred) -> Array, int
Gets upper and lower comperison heatmaps.
Args:
data (DataFrame): Frame with values and predictions
no_pred (bool): One of True or False. If True, then plots observed values on both sides.
"""
st = int(data["i"].min())
data["i"] = data["i"] - st
data["j"] = data["j"] - st
nr = int(data["j"].max()) + 1
rows = np.array(data["i"]).astype(int)
cols = np.array(data["j"]).astype(int)
"initialize"
hic_mat = np.zeros((nr, nr))
hic_mat[rows, cols] = np.array(data["v"])
hic_upper = np.triu(hic_mat)
"check for pred"
if no_pred:
hic_mat[cols, rows] = np.array(data["v"])
else:
hic_mat[cols, rows] = np.array(data["pred"])
hic_lower = np.tril(hic_mat)
hic_mat = hic_upper + hic_lower
hic_mat[np.diag_indices_from(hic_mat)] /= 2
return hic_mat, st
def plot_foxg1(cfg, data):
"""
plot_foxg1(cfg, data) -> No return object
Plots window around foxg1 ko site.
Args:
cfg (Config): configuration to use
data (DataFrame): Frame with values and predictions
"""
site = cfg.foxg1_indices
data["i"] = data["i"] - site
data["j"] = data["j"] - site
"window"
data = data.loc[(data["i"] >= -100) & (data["i"] <= 100) &
(data["j"] >= -100) & (data["j"] <= 100)]
data["i"] = data["i"] + 100
data["j"] = data["j"] + 100
"form matrix"
nr = 201
rows = np.array(data["i"]).astype(int)
cols = np.array(data["j"]).astype(int)
hic_mat = np.zeros((nr, nr))
hic_mat[rows, cols] = np.array(data["v"])
hic_upper = np.triu(hic_mat)
hic_mat[cols, rows] = np.array(data["pred"])
hic_lower = np.tril(hic_mat)
hic_mat = hic_upper + hic_lower
hic_mat[np.diag_indices_from(hic_mat)] /= 2
"plot"
simple_plot(hic_mat, mode="reds")
def simple_plot(hic_win, mode):
"""
simple_plot(hic_win, mode) -> No return object
plots heatmaps of reds or differences.
Args:
hic_win (Array): Matrix of Hi-C values
mode (string): one of reds or diff
"""
if mode == "reds":
plt.figure()
sns.set_theme()
ax = sns.heatmap(hic_win, cmap="Reds", vmin=0, vmax=1)
ax.set_yticks([])
ax.set_xticks([])
plt.savefig("/home/kevindsouza/Downloads/heatmap.svg", format="svg")
plt.show()
if mode == "diff":
plt.figure()
sns.set_theme()
rdgn = sns.diverging_palette(h_neg=220, h_pos=14, s=79, l=55, sep=3, as_cmap=True)
sns.heatmap(hic_win, cmap=rdgn, center=0.00, cbar=True)
plt.yticks([])
plt.xticks([])
# plt.savefig("/home/kevindsouza/Downloads/ctcf_ko.png")
plt.show()
def indices_diff_mat(indice, st, hic_mat, mode="ctcf"):
"""
indices_diff_mat(indice, st, hic_mat, mode) -> Array
gets window matrices given indices
Args:
indice (Array): Matrix of Hi-C values
st (int): Starting indice
hic_mat (Array): Matrix of Hi-C values
mode (string): tadbs or others
"""
nrows = len(hic_mat)
if mode == "tadbs":
i = indice[0] - st
j = indice[1] - st
if i - 98 >= 0:
win_start = i - 98
else:
win_start = 0
if j + 98 <= (nrows - 1):
win_stop = i + 98
else:
win_stop = nrows - 1
else:
i = indice - st
if i - 100 >= 0:
win_start = i - 100
else:
win_start = 0
if i + 100 <= (nrows - 1):
win_stop = i + 100
else:
win_stop = nrows - 1
hic_win = hic_mat[win_start:win_stop, win_start:win_stop]
return hic_win
def plot_frame_error(error_list):
"""
plot_frame_error(error_list) -> No return object
Plot frame error given error list
Args:
error_list (List): List of errors
"""
pos_list = np.arange(0, 150)
plt.figure()
plt.xlabel("Position in Frame", fontsize=14)
plt.ylabel("Average Error", fontsize=14)
plt.plot(pos_list, error_list)
plt.grid(False)
plt.show()
def plot_smoothness(representations):
"""
plot_smoothness(representations) -> No return object
Plot smoothness of representations.
Args:
representations (Array): representation matrix
"""
window = 2000
nrows = len(representations)
diff_list = np.arange(-window, window + 1)
diff_list = np.delete(diff_list, [window])
diff_vals = np.zeros((nrows, 2 * window))
for r in range(nrows):
for i, d in enumerate(diff_list):
if (r + d) >= 0 and (r + d) <= nrows - 1:
diff_vals[r, i] = np.linalg.norm(representations[r, :] - representations[r + d, :], ord=1)
else:
continue
diff_reduce = diff_vals.mean(axis=0)
plt.title("Average L2 Norm of Embeddings with Distance")
plt.xlabel("Distance in 10 Kbp", fontsize=14)
plt.ylabel("Average L2 Norm", fontsize=14)
plt.plot(diff_list, diff_reduce)
plt.grid(b=None)
plt.show()
def plot3d(representations):
"""
plot3d(representations) -> No return object
Plot first 3 dims of representations.
Args:
representations (Array): representation matrix
"""
plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(representations[:, 0], representations[:, 1], representations[:, 2], 'red')
plt.show()
def plot_euclid_heatmap(representations):
"""
plot_euclid_heatmap(representations) -> No return object
Plot heatmap of euclidean distance.
Args:
representations (Array): representation matrix
"""
nr = len(representations)
euclid_heatmap = np.zeros((nr, nr))
for r1 in range(nr):
for r2 in range(nr):
euclid_heatmap[r1, r2] = np.linalg.norm(representations[r1, :] - representations[r2, :])
simple_plot(euclid_heatmap, mode="reds")
def plot_pr_curve(precision, recall):
"""
plot_pr_curve(precision, recall) -> No return object
Plot PR curve.
Args:
precision (List): List of precision values
recall (List): List of recall values
"""
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve')
plt.savefig('XGBoost_PR')
plt.show()
def plot_confusion_matrix(predictions):
"""
plot_confusion_matrix(predictions) -> No return object
Plot confusion matrix for subcompartments.
Args:
predictions (DataFrame): frame of true and predicted subcompartments
"""
conf_matrix = confusion_matrix(predictions[:, 7], predictions[:, 6])
conf_matrix = conf_matrix[1:, 1:]
df_cm = pd.DataFrame(conf_matrix)
df_cm = df_cm.div(df_cm.sum(axis=0), axis=1)
x_axis_labels = ["A2", "A1", "B1", "B2", "B3"]
y_axis_labels = ["A2",
"A1", "B1", "B2", "B3"]
sn.set(font_scale=1.4)
sn.heatmap(df_cm, annot=True, cmap="YlGnBu", fmt="d", xticklabels=x_axis_labels,
yticklabels=y_axis_labels)
plt.show()
def plot_combined(map_frame):
"""
plot_combined(map_frame) -> No return object
Plot map for tasks
Args:
map_frame (DataFrame): dataframe of map values
"""
tasks = ["Gene Expression", "Replication Timing", "Enhancers", "TSS", "PE-Interactions", "FIREs",
"Non-loop Domains", "Loop Domains"]
df_main = pd.DataFrame(columns=["Tasks", "Hi-C-LSTM"])
df_main["Tasks"] = tasks
df_main["Hi-C-LSTM"] = [map_frame["gene_map"].mean(), map_frame["rep_map"].mean(),
map_frame["enhancers_map"].mean(), map_frame["tss_map"].mean(),
map_frame["pe_map"].mean(), map_frame["fire_map"].mean(),
map_frame["domains_map"].mean(), map_frame["loops_map"].mean()]
plt.figure(figsize=(12, 10))
plt.xticks(rotation=90, fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel("Prediction Target", fontsize=20)
plt.ylabel("mAP ", fontsize=20)
plt.plot('Tasks', 'Hi-C-LSTM', data=df_main, marker='o', markersize=16, color="C3",
linewidth=3,
label="Hi-C-LSTM")
plt.legend(fontsize=18)
plt.show()
def plot_gbr(main_df):
"""
plot_gbr(main_df) -> No return object
Gets violin plots of Segway GBR
Args:
main_df (DataFrame): DF containing if values and targets
"""
main_df["ig"] = main_df["ig"].astype(float)
plt.figure(figsize=(16, 7))
sns.set(font_scale=1.8)
sns.set_style(style='white')
plt.xticks(rotation=90, fontsize=20)
plt.ylim(-1, 1)
ax = sns.violinplot(x="target", y="ig", data=main_df)
ax.set(xlabel='', ylabel='IG Importance')
plt.show()
def plot_r2(comb_r2_df):
"""
plot_r2(comb_r2_df) -> No return object
plots average R2 values at a particular difference.
Args:
comb_r2_df (DataFrame): DF containing R2 values for various differences in positions.
"""
max_diff = int(comb_r2_df['diff'].max())
max_mb = 100
num_bins_1mb = 10
pos = np.arange(0, max_mb)
avg_diff = pd.DataFrame(columns=["diff", "r2"])
r2_list = []
final_r2 = np.zeros((max_mb, ))
"get average r2"
for diff in range(max_diff):
subset_diff = comb_r2_df.loc[comb_r2_df["diff"] == diff]
r2_mean = subset_diff["r2"].mean()
avg_diff = avg_diff.append({"diff": diff, "r2": r2_mean}, ignore_index=True)
"mean in window"
for i in range(int(np.ceil(max_diff/num_bins_1mb))):
r2_sub = avg_diff.loc[(avg_diff["diff"] >= i*num_bins_1mb) & (avg_diff["diff"] < (i+1)*num_bins_1mb)]
r2_mean = r2_sub["r2"].mean(skipna=True)
if r2_mean < -1:
r2_list.append(0)
else:
r2_list.append(r2_mean)
num_windows = int(np.ceil(len(r2_list)/max_mb))
if num_windows == 1:
r2_list_pos = np.zeros((num_windows, len(r2_list)))
else:
r2_list_pos = np.zeros((num_windows, max_mb))
for k in range(num_windows):
if k == num_windows - 1:
r2_list_pos[k, :len(r2_list[k * max_mb: ])] = r2_list[k * max_mb: ]
else:
r2_list_pos[k] = r2_list[k * max_mb: (k + 1) * max_mb]
r2_list_pos = np.mean(r2_list_pos, axis=0)
final_r2[:len(r2_list_pos)] = r2_list_pos
"plot"
plt.figure(figsize=(12, 10))
plt.plot(pos, final_r2, marker='', markersize=14, color='C0', label='Hi-C-LSTM')
plt.tick_params(axis="x", labelsize=20, length=0)
plt.tick_params(axis="y", labelsize=20)
plt.xlabel('Distance between positions in Mbp', fontsize=20)
plt.ylabel('R-squared for Replicate-1', fontsize=20)
plt.legend(loc='upper right', fontsize=20)
plt.show()
print("done")
def scatter_tal_lm(ko, wt):
"""
scatter_tal_lm(ko, wt) -> No return object
Scatter plot of TAL1 and LMO2 prediction differences.
Args:
ko (Array): Array containing after knockout values
wt (Array): Array containing before knockout values
"""
def func(x, a):
return a * x
diff_mat = ko - wt
diff_mat[0,0] = 0
og = np.triu(diff_mat)
og = og.flatten(order='C')
pred = np.triu(diff_mat.T)
pred = pred.flatten(order='C')
plt.figure(figsize=(10, 8))
#res = sm.OLS(pred, og).fit()
m, _ = curve_fit(func, og, pred)
plt.scatter(og, pred, marker='o', alpha=0.5)
plt.plot(og, m*og, "g")
# sns.regplot(og, pred)
plt.tick_params(axis="x", labelsize=20, length=0)
plt.tick_params(axis="y", labelsize=20)
plt.xlim([-1.0, 1.0])
plt.ylim([-1.0, 1.0])
plt.xlabel('TAL1 KO - WT (Observed)', fontsize=20)
plt.ylabel('TAL1 KO - WT (Predicted)', fontsize=20)
plt.tight_layout()
plt.savefig("/home/kevindsouza/Downloads/tal1_scatter.png")
def hist_2d(og, pred):
"""
hist_2d(og, pred) -> No return object
2D histogram of observed and predicted differences.
Args:
og (Array): Array containing observed differences
pred (Array): Array containing predicted differences
"""
x_min = np.min(og)
x_max = np.max(og)
y_min = np.min(pred)
y_max = np.max(pred)
x_bins = np.linspace(x_min, x_max, 50)
y_bins = np.linspace(y_min, y_max, 50)
plt.figure(figsize=(10, 8))
hist, _, _, _ = plt.hist2d(og, pred, bins=[x_bins, y_bins])
plt.xticks(fontsize=18)
plt.xlim([0, 0.1])
plt.yticks(fontsize=18)
plt.ylim([0.004, 0.1])
plt.xlabel('LMO2 KO - WT (Original)', fontsize=20)
plt.ylabel('LMO2 KO - WT (Predicted)', fontsize=20)
plt.tight_layout()
plt.savefig("/home/kevindsouza/Downloads/lmo2_hist.png")
if __name__ == '__main__':
cfg = Config()
cell = cfg.cell
for chr in cfg.chr_test_list:
'''
r2_diff = pd.read_csv(cfg.output_directory + "r2frame_%s_chr%s.csv" % (cell, str(chr)), sep="\t")
r2_diff = r2_diff.drop(['Unnamed: 0'], axis=1)
comb_r2_df = comb_r2_df.append(r2_diff, ignore_index=True)
plot_r2(comb_r2_df)
'''
pred_data = pd.read_csv(cfg.output_directory + "hiclstm_%s_predictions_chr%s.csv" % | |
the rotation.
"""
return self._coords
def do_rot(self, i):
"""Returns True if rotation is not (close to) identity.
"""
return not np.allclose(self.rots[i], np.zeros(3), rtol=0.0, atol=1.0e-15)
def angle_ref(self, *args, **kwds):
"""Compute the angle between transverse reference direction of initial and final frames
For example, if angle of polarisation is psi in initial frame, it will be psi+angle_ref in final
frame.
Parameters
----------
dir_or_vec : array
Direction or vector (see Rotator.__call__)
lonlat: bool, optional
If True, assume input is longitude,latitude in degrees. Otherwise,
theta,phi in radian. Default: False
inv : bool, optional
If True, use the inverse transforms. Default: False
Returns
-------
angle : float, scalar or array
Angle in radian (a scalar or an array if input is a sequence of direction/vector)
"""
R = self
lonlat = kwds.get("lonlat", False)
inv = kwds.get("inv", False)
if len(args) == 1:
arg = args[0]
if not hasattr(arg, "__len__") or len(arg) < 2 or len(arg) > 3:
raise TypeError("Argument must be a sequence of 2 or 3 " "elements")
if len(arg) == 2:
v = dir2vec(arg[0], arg[1], lonlat=lonlat)
else:
v = arg
elif len(args) == 2:
v = dir2vec(args[0], args[1], lonlat=lonlat)
elif len(args) == 3:
v = args
else:
raise TypeError("Either 1, 2 or 3 arguments accepted")
vp = R(v, inv=inv)
north_pole = R([0.0, 0.0, 1.0], inv=inv)
sinalpha = north_pole[0] * vp[1] - north_pole[1] * vp[0]
cosalpha = north_pole[2] - vp[2] * np.dot(north_pole, vp)
return np.arctan2(sinalpha, cosalpha)
def rotate_alm(self, alm, lmax=None, mmax=None):
"""Rotate Alms with the transform defined in the Rotator object
see the docstring of the rotate_alm function defined
in the healpy package, this function **returns** the rotated alms,
does not rotate in place"""
rotated_alm = alm.copy() # rotate_alm works inplace
rotate_alm(rotated_alm, matrix=self.mat, lmax=lmax, mmax=mmax)
return rotated_alm
def rotate_map_alms(self, m, use_pixel_weights=True, lmax=None, mmax=None):
"""Rotate a HEALPix map to a new reference frame in spherical harmonics space
This is generally the best strategy to rotate/change reference frame of maps.
If the input map is band-limited, i.e. it can be represented exactly by
a spherical harmonics transform under a specific lmax, the map rotation
will be invertible.
Parameters
----------
m : np.ndarray
Input map, 1 map is considered I, 2 maps:[Q,U], 3 maps:[I,Q,U]
use_pixel_weights : bool, optional
Use pixel weights in map2alm
Returns
-------
m_rotated : np.ndarray
Map in the new reference frame
"""
alm = sphtfunc.map2alm(
m, use_pixel_weights=use_pixel_weights, lmax=lmax, mmax=mmax
)
rotated_alm = self.rotate_alm(alm, lmax=lmax, mmax=mmax)
return sphtfunc.alm2map(
rotated_alm, lmax=lmax, mmax=mmax, nside=pixelfunc.get_nside(m)
)
def rotate_map_pixel(self, m):
"""Rotate a HEALPix map to a new reference frame in pixel space
It is generally better to rotate in spherical harmonics space, see
the rotate_map_alms method. A case where pixel space rotation is
better is for heavily masked maps where the spherical harmonics
transform is not well defined.
This function first rotates the pixels centers of the new reference
frame to the original reference frame, then uses hp.get_interp_val
to interpolate bilinearly the pixel values, finally fixes Q and U
polarization by the modification to the psi angle caused by
the Rotator using Rotator.angle_ref.
Due to interpolation, this function generally suppresses the signal at
high angular scales.
Parameters
----------
m : np.ndarray
Input map, 1 map is considered I, 2 maps:[Q,U], 3 maps:[I,Q,U]
Returns
-------
m_rotated : np.ndarray
Map in the new reference frame
"""
if pixelfunc.maptype(m) == 0: # a single map is converted to a list
m = [m]
npix = len(m[0])
nside = pixelfunc.npix2nside(npix)
theta_pix_center, phi_pix_center = pixelfunc.pix2ang(
nside=nside, ipix=np.arange(npix)
)
# Rotate the pixels center of the new reference frame to the original frame
theta_pix_center_rot, phi_pix_center_rot = self.I(
theta_pix_center, phi_pix_center
)
# Interpolate the original map to the pixels centers in the new ref frame
m_rotated = [
pixelfunc.get_interp_val(each, theta_pix_center_rot, phi_pix_center_rot)
for each in m
]
# Rotate polarization
if len(m_rotated) > 1:
# Create a complex map from QU and apply the rotation in psi due to the rotation
# Slice from the end of the array so that it works both for QU and IQU
L_map = (m_rotated[-2] + m_rotated[-1] * 1j) * np.exp(
1j * 2 * self.angle_ref(theta_pix_center_rot, phi_pix_center_rot)
)
# Overwrite the Q and U maps with the correct values
m_rotated[-2] = np.real(L_map)
m_rotated[-1] = np.imag(L_map)
else:
m_rotated = m_rotated[0]
return m_rotated
def __repr__(self):
return (
"[ "
+ ", ".join([str(self._coords), str(self._rots), str(self._invs)])
+ " ]"
)
__str__ = __repr__
################################################################
#
# Helpers function for rotation
# used in the Rotator class.
def rotateVector(rotmat, vec, vy=None, vz=None, do_rot=True):
"""Rotate a vector (or a list of vectors) using the rotation matrix
given as first argument.
Parameters
----------
rotmat : float, array-like shape (3,3)
The rotation matrix
vec : float, scalar or array-like
The vector to transform (shape (3,) or (3,N)),
or x component (scalar or shape (N,)) if vy and vz are given
vy : float, scalar or array-like, optional
The y component of the vector (scalar or shape (N,))
vz : float, scalar or array-like, optional
The z component of the vector (scalar or shape (N,))
do_rot : bool, optional
if True, really perform the operation, if False do nothing.
Returns
-------
vec : float, array
The component of the rotated vector(s).
See Also
--------
Rotator
"""
if vy is None and vz is None:
if do_rot:
return np.tensordot(rotmat, vec, axes=(1, 0))
else:
return vec
elif vy is not None and vz is not None:
if do_rot:
return np.tensordot(rotmat, np.array([vec, vy, vz]), axes=(1, 0))
else:
return vec, vy, vz
else:
raise TypeError("You must give either vec only or vec, vy " "and vz parameters")
def rotateDirection(rotmat, theta, phi=None, do_rot=True, lonlat=False):
"""Rotate the vector described by angles theta,phi using the rotation matrix
given as first argument.
Parameters
----------
rotmat : float, array-like shape (3,3)
The rotation matrix
theta : float, scalar or array-like
The angle theta (scalar or shape (N,))
or both angles (scalar or shape (2, N)) if phi is not given.
phi : float, scalar or array-like, optionnal
The angle phi (scalar or shape (N,)).
do_rot : bool, optional
if True, really perform the operation, if False do nothing.
lonlat : bool
If True, input angles are assumed to be longitude and latitude in degree,
otherwise, they are co-latitude and longitude in radians.
Returns
-------
angles : float, array
The angles of describing the rotated vector(s).
See Also
--------
Rotator
"""
vx, vy, vz = rotateVector(rotmat, dir2vec(theta, phi, lonlat=lonlat), do_rot=do_rot)
return vec2dir(vx, vy, vz, lonlat=lonlat)
def vec2dir(vec, vy=None, vz=None, lonlat=False):
"""Transform a vector to angle given by theta,phi.
Parameters
----------
vec : float, scalar or array-like
The vector to transform (shape (3,) or (3,N)),
or x component (scalar or shape (N,)) if vy and vz are given
vy : float, scalar or array-like, optional
The y component of the vector (scalar or shape (N,))
vz : float, scalar or array-like, optional
The z component of the vector (scalar or shape (N,))
lonlat : bool, optional
If True, return angles will be longitude and latitude in degree,
otherwise, angles will be longitude and co-latitude in radians (default)
Returns
-------
angles : float, array
The angles (unit depending on *lonlat*) in an array of
shape (2,) (if scalar input) or (2, N)
See Also
--------
:func:`dir2vec`, :func:`pixelfunc.ang2vec`, :func:`pixelfunc.vec2ang`
"""
if np.any(np.isnan(vec)):
return np.nan, np.nan
if vy is None and vz is None:
vx, vy, vz = vec
elif vy is not None and vz is not None:
vx = vec
else:
raise TypeError("You must either give both vy and vz or none of them")
r = np.sqrt(vx ** 2 + vy ** 2 + vz ** 2)
ang = np.empty((2, r.size))
ang[0, :] = np.arccos(vz / r)
ang[1, :] = np.arctan2(vy, vx)
if lonlat:
ang = np.degrees(ang)
np.negative(ang[0, :], ang[0, :])
ang[0, :] += 90.0
return ang[::-1, :].squeeze()
else:
return ang.squeeze()
def dir2vec(theta, phi=None, lonlat=False):
"""Transform a direction theta,phi to a unit vector.
Parameters
----------
theta : float, scalar or array-like
| |
import cv2
import random
import numpy as np
import torch
# helper --
# def make_grid_image(width, height, grid_size = 16):
# image = np.zeros((height, width), np.float32)
# for y in range(0, height, 2 * grid_size):
# for x in range(0, width, 2 * grid_size):
# image[y: y + grid_size, x:x + grid_size] = 1
#
# # for y in range(height+grid_size,2*grid_size):
# # for x in range(width+grid_size,2*grid_size):
# # image[y: y+grid_size,x:x+grid_size] = 1
#
# return image
# ---
def do_identity(image, magnitude=None):
return image
# *** geometric ***
def do_random_projective(image, magnitude=0.2):
mag = np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
x0, y0 = 0, 0
x1, y1 = 1, 0
x2, y2 = 1, 1
x3, y3 = 0, 1
mode = np.random.choice(['top', 'bottom', 'left', 'right'])
if mode == 'top':
x0, x1 = x0 + mag, x1 - mag
if mode == 'bottom':
x3, x2 = x3 + mag, x2 - mag
if mode == 'left':
y0, y3 = y0 + mag, y3 - mag
if mode == 'right':
y1, y2 = y1 + mag, y2 - mag
s = np.array([[0, 0], [1, 0], [1, 1], [0, 1], ]) * [[width, height]]
d = np.array([[x0, y0], [x1, y1], [x2, y2], [x3, y3], ]) * [[width, height]]
transform = cv2.getPerspectiveTransform(s.astype(np.float32), d.astype(np.float32))
image = cv2.warpPerspective(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
def do_random_perspective(image, magnitude=0.1):
mag = np.random.uniform(-1, 1, (4, 2)) * magnitude
height, width = image.shape[:2]
s = np.array([[0, 0], [1, 0], [1, 1], [0, 1], ])
d = s + mag
s *= [[width, height]]
d *= [[width, height]]
transform = cv2.getPerspectiveTransform(s.astype(np.float32), d.astype(np.float32))
image = cv2.warpPerspective(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
def do_random_scale(image, magnitude=0.2):
s = 1 + np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
transform = np.array([
[s, 0, 0],
[0, s, 0],
], np.float32)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
def do_random_shear_x(image, magnitude=0.2):
sx = np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
transform = np.array([
[1, sx, 0],
[0, 1, 0],
], np.float32)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
def do_random_shear_y(image, magnitude=0.1):
sy = np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
transform = np.array([
[1, 0, 0],
[sy, 1, 0],
], np.float32)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
def do_random_stretch_x(image, magnitude=0.2):
sx = 1 + np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
transform = np.array([
[sx, 0, 0],
[0, 1, 0],
], np.float32)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
def do_random_stretch_y(image, magnitude=0.2):
sy = 1 + np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
transform = np.array([
[1, 0, 0],
[0, sy, 0],
], np.float32)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
def do_random_rotate(image, magnitude=15):
angle = np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
cx, cy = width // 2, height // 2
transform = cv2.getRotationMatrix2D((cx, cy), -angle, 1.0)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
# ----
def do_random_grid_distortion(image, magnitude=0.3):
num_step = 5
distort = magnitude
# http://pythology.blogspot.sg/2014/03/interpolation-on-regular-distorted-grid.html
distort_x = [1 + random.uniform(-distort, distort) for i in range(num_step + 1)]
distort_y = [1 + random.uniform(-distort, distort) for i in range(num_step + 1)]
# ---
height, width = image.shape[:2]
xx = np.zeros(width, np.float32)
step_x = width // num_step
prev = 0
for i, x in enumerate(range(0, width, step_x)):
start = x
end = x + step_x
if end > width:
end = width
cur = width
else:
cur = prev + step_x * distort_x[i]
xx[start:end] = np.linspace(prev, cur, end - start)
prev = cur
yy = np.zeros(height, np.float32)
step_y = height // num_step
prev = 0
for idx, y in enumerate(range(0, height, step_y)):
start = y
end = y + step_y
if end > height:
end = height
cur = height
else:
cur = prev + step_y * distort_y[idx]
yy[start:end] = np.linspace(prev, cur, end - start)
prev = cur
map_x, map_y = np.meshgrid(xx, yy)
map_x = map_x.astype(np.float32)
map_y = map_y.astype(np.float32)
image = cv2.remap(image, map_x, map_y, interpolation=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
# https://github.com/albumentations-team/albumentations/blob/8b58a3dbd2f35558b3790a1dbff6b42b98e89ea5/albumentations/augmentations/transforms.py
# https://ciechanow.ski/mesh-transforms/
# https://stackoverflow.com/questions/53907633/how-to-warp-an-image-using-deformed-mesh
# http://pythology.blogspot.sg/2014/03/interpolation-on-regular-distorted-grid.html
def do_random_custom_distortion1(image, magnitude=0.15):
distort = magnitude
height, width = image.shape
s_x = np.array([0.0, 0.5, 1.0, 0.0, 0.5, 1.0, 0.0, 0.5, 1.0])
s_y = np.array([0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 1.0, 1.0, 1.0])
d_x = s_x.copy()
d_y = s_y.copy()
d_x[[1, 4, 7]] += np.random.uniform(-distort, distort, 3)
d_y[[3, 4, 5]] += np.random.uniform(-distort, distort, 3)
s_x = (s_x * width)
s_y = (s_y * height)
d_x = (d_x * width)
d_y = (d_y * height)
# ---
distort = np.zeros((height, width), np.float32)
for index in ([4, 1, 3], [4, 1, 5], [4, 7, 3], [4, 7, 5]):
point = np.stack([s_x[index], s_y[index]]).T
qoint = np.stack([d_x[index], d_y[index]]).T
src = np.array(point, np.float32)
dst = np.array(qoint, np.float32)
mat = cv2.getAffineTransform(src, dst)
point = np.round(point).astype(np.int32)
x0 = np.min(point[:, 0])
x1 = np.max(point[:, 0])
y0 = np.min(point[:, 1])
y1 = np.max(point[:, 1])
mask = np.zeros((height, width), np.float32)
mask[y0:y1, x0:x1] = 1
mask = mask * image
warp = cv2.warpAffine(mask, mat, (width, height), borderMode=cv2.BORDER_REPLICATE)
distort = np.maximum(distort, warp)
# distort = distort+warp
return distort
# *** intensity ***
def do_random_contast(image, magnitude=0.2):
alpha = 1 + random.uniform(-1, 1) * magnitude
image = image.astype(np.float32) * alpha
image = np.clip(image, 0, 1)
return image
def do_random_block_fade(image, magnitude=0.3):
size = [0.1, magnitude]
height, width = image.shape
# get bounding box
m = image.copy()
cv2.rectangle(m, (0, 0), (height, width), 1, 5)
m = image < 0.5
if m.sum() == 0:
return image
m = np.where(m)
y0, y1, x0, x1 = np.min(m[0]), np.max(m[0]), np.min(m[1]), np.max(m[1])
w = x1 - x0
h = y1 - y0
if w * h < 10:
return image
ew, eh = np.random.uniform(*size, 2)
ew = int(ew * w)
eh = int(eh * h)
ex = np.random.randint(0, w - ew) + x0
ey = np.random.randint(0, h - eh) + y0
image[ey:ey + eh, ex:ex + ew] *= np.random.uniform(0.1, 0.5) # 1 #
image = np.clip(image, 0, 1)
return image
# *** noise ***
# https://www.kaggle.com/ren4yu/bengali-morphological-ops-as-image-augmentation
def do_random_erode(image, magnitude=2):
s = int(round(1 + np.random.uniform(0, 1) * magnitude))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, tuple((s, s)))
image = cv2.erode(image, kernel, iterations=1)
return image
def do_random_dilate(image, magnitude=1.5):
s = int(round(1 + np.random.uniform(0, 1) * magnitude))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, tuple((s, s)))
image = cv2.dilate(image, kernel, iterations=1)
return image
def do_random_sprinkle(image, magnitude=0.2):
size = 16
num_sprinkle = int(round(1 + np.random.randint(10) * magnitude))
height, width = image.shape
image = image.copy()
image_small = cv2.resize(image, dsize=None, fx=0.25, fy=0.25)
m = np.where(image_small > 0.25)
num = len(m[0])
if num == 0:
return image
s = size // 2
i = np.random.choice(num, num_sprinkle)
for y, x in zip(m[0][i], m[1][i]):
y = y * 4 + 2
x = x * 4 + 2
image[y - s:y + s, x - s:x + s] = 0 # 0.5 #1 #
return image
# https://stackoverflow.com/questions/14435632/impulse-gaussian-and-salt-and-pepper-noise-with-opencv
def do_random_noise(image, magnitude=0.15):
height, width = image.shape
noise = np.random.uniform(-1, 1, (height, width)) * magnitude
image = image + noise
image = np.clip(image, 0, 1)
return image
def do_random_line(image, magnitude=0.2):
num_lines = int(round(1 + np.random.randint(10) * magnitude))
height, width = image.shape
image = image.copy()
def line0():
return (0, 0), (width - 1, 0)
def line1():
return (0, height - 1), (width - 1, height - 1)
def line2():
return (0, 0), (0, height - 1)
def line3():
return (width - 1, 0), (width - 1, height - 1)
def line4():
x0, x1 = np.random.choice(width, 2)
return (x0, 0), (x1, height - 1)
def line5():
y0, y1 = np.random.choice(height, 2)
return (0, y0), (width - 1, y1)
for i in range(num_lines):
p = np.array([1 / 4, 1 / 4, 1 / 4, 1 / 4, 1, 1])
func = np.random.choice([line0, line1, line2, line3, line4, line5], p=p / p.sum())
(x0, y0), (x1, y1) = func()
color = np.random.uniform(0, 1)
thickness = np.random.randint(1, 5)
line_type = np.random.choice([cv2.LINE_AA, cv2.LINE_4, cv2.LINE_8])
cv2.line(image, (x0, y0), (x1, y1), color, thickness, line_type)
return image
# batch augmentation that uses pairing, e.g mixup, cutmix, cutout #####################
def make_object_box(image):
m = image.copy()
cv2.rectangle(m, (0, 0), (236, 137), 0, 10)
m | |
<filename>pyCHX/Create_Report.py
'''
Yugang Created at Aug 08, 2016, CHX-NSLS-II
Create a PDF file from XPCS data analysis results, which are generated by CHX data analysis pipeline
How to use:
python Create_Report.py full_file_path uid output_dir (option)
An exmplae to use:
python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66
python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/test/
'''
def check_dict_keys( dicts, key):
if key not in list(dicts.keys()):
dicts[key] = 'unknown'
import h5py
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch, cm , mm
from reportlab.lib.colors import pink, green, brown, white, black, red, blue
from reportlab.lib.styles import getSampleStyleSheet
#from reportlab.platypus import Image, Paragraph, Table
from reportlab.lib.pagesizes import letter, A4
from pyCHX.chx_generic_functions import (pload_obj )
from PIL import Image
from time import time
from datetime import datetime
import sys,os
import pandas as pds
import numpy as np
def add_one_line_string( c, s, top, left=30, fontsize = 11 ):
if (fontsize*len(s )) >1000:
fontsize = 1000./(len(s))
c.setFont("Helvetica", fontsize )
c.drawString(left, top, s)
def add_image_string( c, imgf, data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top, return_ = False ):
image = data_dir + imgf
if os.path.exists(image):
im = Image.open( image )
ratio = float(im.size[1])/im.size[0]
height= img_height
width = height/ratio
#if width>400:
# width = 350
# height = width*ratio
c.drawImage( image, img_left, img_top, width= width,height=height,mask=None)
c.setFont("Helvetica", 16)
c.setFillColor( blue )
c.drawString(str1_left, str1_top,str1 )
c.setFont("Helvetica", 12)
c.setFillColor(red)
c.drawString(str2_left, str2_top, 'filename: %s'%imgf )
if return_:
return height/ratio
else:
c.setFillColor( blue )
c.drawString( str1_left, str1_top, str1)
c.setFillColor(red)
c.drawString( str1_left, str1_top -40, '-->Not Calculated!' )
class create_pdf_report( object ):
'''Aug 16, YG@CHX-NSLS-II
Create a pdf report by giving data_dir, uid, out_dir
data_dir: the input data directory, including all necessary images
the images names should be:
meta_file = 'uid=%s-md'%uid
avg_img_file = 'uid=%s--img-avg-.png'%uid
ROI_on_img_file = 'uid=%s--ROI-on-Image-.png'%uid
qiq_file = 'uid=%s--Circular-Average-.png'%uid
ROI_on_Iq_file = 'uid=%s--ROI-on-Iq-.png'%uid
Iq_t_file = 'uid=%s--Iq-t-.png'%uid
img_sum_t_file = 'uid=%s--img-sum-t.png'%uid
wat_file= 'uid=%s--Waterfall-.png'%uid
Mean_inten_t_file= 'uid=%s--Mean-intensity-of-each-ROI-.png'%uid
g2_file = 'uid=%s--g2-.png'%uid
g2_fit_file = 'uid=%s--g2--fit-.png'%uid
q_rate_file = 'uid=--%s--Q-Rate--fit-.png'%uid
two_time_file = 'uid=%s--Two-time-.png'%uid
two_g2_file = 'uid=%s--g2--two-g2-.png'%uid
uid: the unique id
out_dir: the output directory
report_type:
'saxs': report saxs results
'gisaxs': report gisaxs results
Output:
A PDF file with name as "XPCS Analysis Report for uid=%s"%uid in out_dir folder
'''
def __init__( self, data_dir, uid, out_dir=None, filename=None, load=True, user=None,
report_type='saxs',md=None, res_h5_filename=None ):
self.data_dir = data_dir
self.uid = uid
self.md = md
#print(md)
if user is None:
user = 'chx'
self.user = user
if out_dir is None:
out_dir = data_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
self.out_dir=out_dir
self.styles = getSampleStyleSheet()
self.width, self.height = letter
self.report_type = report_type
dt =datetime.now()
CurTime = '%02d/%02d/%s/-%02d/%02d/' % ( dt.month, dt.day, dt.year,dt.hour,dt.minute)
self.CurTime = CurTime
if filename is None:
filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid
filename=out_dir + filename
c = canvas.Canvas( filename, pagesize=letter)
self.filename= filename
self.res_h5_filename = res_h5_filename
#c.setTitle("XPCS Analysis Report for uid=%s"%uid)
c.setTitle(filename)
self.c = c
if load:
self.load_metadata()
def load_metadata(self):
uid=self.uid
data_dir = self.data_dir
#load metadata
meta_file = 'uid=%s_md'%uid
self.metafile = data_dir + meta_file
if self.md is None:
md = pload_obj( data_dir + meta_file )
self.md = md
else:
md = self.md
#print('Get md from giving md')
#print(md)
self.sub_title_num = 0
uid_g2 = None
uid_c12 = None
if 'uid_g2' in list(md.keys()):
uid_g2 = md['uid_g2']
if 'uid_c12' in list(md.keys()):
uid_c12 = md['uid_c12']
'''global definition'''
if 'beg_OneTime' in list( md.keys()):
beg_OneTime = md['beg_OneTime']
end_OneTime = md['end_OneTime']
else:
beg_OneTime = None
end_OneTime = None
if 'beg_TwoTime' in list( md.keys()):
beg_TwoTime = md['beg_TwoTime']
end_TwoTime = md['end_TwoTime']
else:
beg_TwoTime = None
end_TwoTime = None
try:
beg = md['beg']
end= md['end']
uid_ = uid + '_fra_%s_%s'%(beg, end)
if beg_OneTime is None:
uid_OneTime = uid + '_fra_%s_%s'%(beg, end)
else:
uid_OneTime = uid + '_fra_%s_%s'%(beg_OneTime, end_OneTime)
if beg_TwoTime is None:
uid_TwoTime = uid + '_fra_%s_%s'%(beg, end)
else:
uid_TwoTime = uid + '_fra_%s_%s'%(beg_TwoTime, end_TwoTime)
except:
uid_ = uid
uid_OneTime = uid
if beg is None:
uid_ = uid
uid_OneTime = uid
self.avg_img_file = 'uid=%s_img_avg.png'%uid
self.ROI_on_img_file = 'uid=%s_ROI_on_Image.png'%uid
self.qiq_file = 'uid=%s_q_Iq.png'%uid
self.qiq_fit_file = 'uid=%s_form_factor_fit.png'%uid
#self.qr_1d_file = 'uid=%s_Qr_ROI.png'%uid
if self.report_type =='saxs' or self.report_type =='ang_saxs':
self.ROI_on_Iq_file = 'uid=%s_ROI_on_Iq.png'%uid
elif self.report_type =='gi_saxs':
self.ROI_on_Iq_file = 'uid=%s_Qr_ROI.png'%uid
self.Iq_t_file = 'uid=%s_q_Iqt.png'%uid
self.img_sum_t_file = 'uid=%s_img_sum_t.png'%uid
self.wat_file= 'uid=%s_waterfall.png'%uid
self.Mean_inten_t_file= 'uid=%s_t_ROIs.png'%uid
self.oavs_file = 'uid=%s_OAVS.png'%uid
if uid_g2 is None:
uid_g2 = uid_OneTime
self.g2_file = 'uid=%s_g2.png'%uid_g2
self.g2_fit_file = 'uid=%s_g2_fit.png'%uid_g2
#print( self.g2_fit_file )
self.g2_new_page = False
self.g2_fit_new_page = False
if self.report_type =='saxs':
jfn = 'uid=%s_g2.png'%uid_g2
if os.path.exists( data_dir + jfn):
self.g2_file = jfn
else:
jfn = 'uid=%s_g2__joint.png'%uid_g2
if os.path.exists( data_dir + jfn):
self.g2_file = jfn
self.g2_new_page = True
#self.g2_new_page = True
jfn = 'uid=%s_g2_fit.png'%uid_g2
if os.path.exists(data_dir + jfn ):
self.g2_fit_file = jfn
#self.g2_fit_new_page = True
else:
jfn = 'uid=%s_g2_fit__joint.png'%uid_g2
if os.path.exists(data_dir + jfn ):
self.g2_fit_file = jfn
self.g2_fit_new_page = True
else:
jfn = 'uid=%s_g2__joint.png'%uid_g2
if os.path.exists( data_dir + jfn):
self.g2_file = jfn
self.g2_new_page = True
jfn = 'uid=%s_g2_fit__joint.png'%uid_g2
if os.path.exists(data_dir + jfn ):
self.g2_fit_file = jfn
self.g2_fit_new_page = True
self.q_rate_file = 'uid=%s_Q_Rate_fit.png'%uid_g2
self.q_rate_loglog_file = 'uid=%s_Q_Rate_loglog.png'%uid_g2
self.g2_q_fitpara_file = 'uid=%s_g2_q_fitpara_plot.png'%uid_g2
#print( self.q_rate_file )
if uid_c12 is None:
uid_c12 = uid_
self.q_rate_two_time_fit_file = 'uid=%s_two_time_Q_Rate_fit.png'%uid_c12
#print( self.q_rate_two_time_fit_file )
self.two_time_file = 'uid=%s_Two_time.png'%uid_c12
self.two_g2_file = 'uid=%s_g2_two_g2.png'%uid_c12
if self.report_type =='saxs':
jfn = 'uid=%s_g2_two_g2.png'%uid_c12
self.two_g2_new_page = False
if os.path.exists( data_dir + jfn ):
#print( 'Here we go')
self.two_g2_file = jfn
#self.two_g2_new_page = True
else:
jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12
self.two_g2_new_page = False
if os.path.exists( data_dir + jfn ):
#print( 'Here we go')
self.two_g2_file = jfn
self.two_g2_new_page = True
else:
jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12
self.two_g2_new_page = False
if os.path.exists( data_dir + jfn ):
#print( 'Here we go')
self.two_g2_file = jfn
self.two_g2_new_page = True
self.four_time_file = 'uid=%s_g4.png'%uid_
jfn = 'uid=%s_g4__joint.png'%uid_
self.g4_new_page = False
if os.path.exists( data_dir + jfn ):
self.four_time_file = jfn
self.g4_new_page = True
self.xsvs_fit_file = 'uid=%s_xsvs_fit.png'%uid_
self.contrast_file = 'uid=%s_contrast.png'%uid_
self.dose_file = 'uid=%s_dose_analysis.png'%uid_
jfn = 'uid=%s_dose_analysis__joint.png'%uid_
self.dose_file_new_page = False
if os.path.exists( data_dir + jfn ):
self.dose_file = jfn
self.dose_file_new_page = True
#print( self.dose_file )
if False:
self.flow_g2v = 'uid=%s_1a_mqv_g2_v_fit.png'%uid_
self.flow_g2p = 'uid=%s_1a_mqp_g2_p_fit.png'%uid_
self.flow_g2v_rate_fit = 'uid=%s_v_fit_rate_Q_Rate_fit.png'%uid_
self.flow_g2p_rate_fit = 'uid=%s_p_fit_rate_Q_Rate_fit.png'%uid_
if True:
self.two_time = 'uid=%s_pv_two_time.png'%uid_
#self.two_time_v = 'uid=%s_pv_two_time.png'%uid_
#self.flow_g2bv = 'uid=%s_g2b_v_fit.png'%uid_
#self.flow_g2bp = 'uid=%s_g2b_p_fit.png'%uid_
self.flow_g2_g2b_p = 'uid=%s_g2_two_g2_p.png'%uid_
self.flow_g2_g2b_v = 'uid=%s_g2_two_g2_v.png'%uid_
self.flow_g2bv_rate_fit = 'uid=%s_vertb_Q_Rate_fit.png'%uid_
self.flow_g2bp_rate_fit = 'uid=%s_parab_Q_Rate_fit.png'%uid_
self.flow_g2v = 'uid=%s_g2_v_fit.png'%uid_
self.flow_g2p = 'uid=%s_g2_p_fit.png'%uid_
self.flow_g2v_rate_fit = 'uid=%s_vert_Q_Rate_fit.png'%uid_
self.flow_g2p_rate_fit = 'uid=%s_para_Q_Rate_fit.png'%uid_
#self.report_header(page=1, top=730, new_page=False)
#self.report_meta(new_page=False)
self.q2Iq_file = 'uid=%s_q2_iq.png'%uid
self.iq_invariant_file = 'uid=%s_iq_invariant.png'%uid
def report_invariant( self, top= 300, new_page=False):
'''create the invariant analysis report
two images:
ROI on average intensity image
ROI on circular average
'''
uid=self.uid
c= self.c
#add sub-title, static images
c.setFillColor(black)
c.setFont("Helvetica", 20)
ds = 230
self.sub_title_num +=1
c.drawString(10, top, "%s. I(q) Invariant Analysis"%self.sub_title_num ) #add title
#add q2Iq
c.setFont("Helvetica", 14)
imgf = self.q2Iq_file
#print( imgf )
label = 'q^2*I(q)'
add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=180,
str1_left=110, str1_top = top-35,str1=label,
str2_left = 60, str2_top = top -320 )
#add iq_invariant
imgf = self.iq_invariant_file
img_height= 180
img_left,img_top =320, top - ds*1.15
str1_left, str1_top,str1= 420, top- 35, 'I(q) Invariant'
str2_left, str2_top = 350, top- 320
#print ( imgf )
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
if new_page:
c.showPage()
c.save()
def report_header(self, page=1, new_page=False):
'''create headers, including title/page number'''
c= self.c
CurTime = self.CurTime
uid=self.uid
user=self.user
c.setFillColor(black)
c.setFont("Helvetica", 14)
#add page number
c.drawString(250, 10, "Page--%s--"%( page ) )
#add time stamp
#c.drawString(350, 10, "Created at %s@CHX-by-%s"%( CurTime,user ) )
s_ = "Created at %s@CHX-By-%s"%( CurTime,user )
add_one_line_string( c, s_, 10, left=350,fontsize = 11 )
#add title
#c.setFont("Helvetica", 22)
title = | |
queryset = FuelSupplier.objects.all()
serializer_class = serializers.FuelSupplierSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new FuelSupplier objects
"""
return self.create(request, *args, **kwargs)
class fuelsuppliersGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available FuelSupplier objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplier.objects.all()
serializer_class = serializers.FuelSupplierSerializer
def get(self, request, *args, **kwargs):
"""
Lists available FuelSupplier objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new FuelSupplier object
"""
return self.create(request, *args, **kwargs)
class fuelsuppliersIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific FuelSupplier object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplier.objects.all()
serializer_class = serializers.FuelSupplierSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified FuelSupplier object
"""
return self.destroy(request, *args, **kwargs)
class fuelsuppliersIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific FuelSupplier object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplier.objects.all()
serializer_class = serializers.FuelSupplierSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified FuelSupplier object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified FuelSupplier object
"""
return self.update(request, *args, **kwargs)
class fuelsupplieractionstypesBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of FuelSupplierActionsType object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierActionsType.objects.all()
serializer_class = serializers.FuelSupplierActionsTypeSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new FuelSupplierActionsType objects
"""
return self.create(request, *args, **kwargs)
class fuelsupplieractionstypesGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available FuelSupplierActionsType objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierActionsType.objects.all()
serializer_class = serializers.FuelSupplierActionsTypeSerializer
def get(self, request, *args, **kwargs):
"""
Lists available FuelSupplierActionsType objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new FuelSupplierActionsType object
"""
return self.create(request, *args, **kwargs)
class fuelsupplieractionstypesIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific FuelSupplierActionsType object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierActionsType.objects.all()
serializer_class = serializers.FuelSupplierActionsTypeSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified FuelSupplierActionsType object
"""
return self.destroy(request, *args, **kwargs)
class fuelsupplieractionstypesIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific FuelSupplierActionsType object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierActionsType.objects.all()
serializer_class = serializers.FuelSupplierActionsTypeSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified FuelSupplierActionsType object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified FuelSupplierActionsType object
"""
return self.update(request, *args, **kwargs)
class fuelsupplierattachmentsBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of FuelSupplierAttachment object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierAttachment.objects.all()
serializer_class = serializers.FuelSupplierAttachmentSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new FuelSupplierAttachment objects
"""
return self.create(request, *args, **kwargs)
class fuelsupplierattachmentsGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available FuelSupplierAttachment objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierAttachment.objects.all()
serializer_class = serializers.FuelSupplierAttachmentSerializer
def get(self, request, *args, **kwargs):
"""
Lists available FuelSupplierAttachment objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new FuelSupplierAttachment object
"""
return self.create(request, *args, **kwargs)
class fuelsupplierattachmentsIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific FuelSupplierAttachment object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierAttachment.objects.all()
serializer_class = serializers.FuelSupplierAttachmentSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified FuelSupplierAttachment object
"""
return self.destroy(request, *args, **kwargs)
class fuelsupplierattachmentsIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific FuelSupplierAttachment object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierAttachment.objects.all()
serializer_class = serializers.FuelSupplierAttachmentSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified FuelSupplierAttachment object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified FuelSupplierAttachment object
"""
return self.update(request, *args, **kwargs)
class fuelsupplierattachmenttagsBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of FuelSupplierAttachmentTag object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierAttachmentTag.objects.all()
serializer_class = serializers.FuelSupplierAttachmentTagSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new FuelSupplierAttachmentTag objects
"""
return self.create(request, *args, **kwargs)
class fuelsupplierattachmenttagsGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available FuelSupplierAttachmentTag objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierAttachmentTag.objects.all()
serializer_class = serializers.FuelSupplierAttachmentTagSerializer
def get(self, request, *args, **kwargs):
"""
Lists available FuelSupplierAttachmentTag objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new FuelSupplierAttachmentTag object
"""
return self.create(request, *args, **kwargs)
class fuelsupplierattachmenttagsIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific FuelSupplierAttachmentTag object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierAttachmentTag.objects.all()
serializer_class = serializers.FuelSupplierAttachmentTagSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified FuelSupplierAttachmentTag object
"""
return self.destroy(request, *args, **kwargs)
class fuelsupplierattachmenttagsIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific FuelSupplierAttachmentTag object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierAttachmentTag.objects.all()
serializer_class = serializers.FuelSupplierAttachmentTagSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified FuelSupplierAttachmentTag object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified FuelSupplierAttachmentTag object
"""
return self.update(request, *args, **kwargs)
class fuelsupplierbalancesBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of FuelSupplierBalance object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierBalance.objects.all()
serializer_class = serializers.FuelSupplierBalanceSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new FuelSupplierBalance objects
"""
return self.create(request, *args, **kwargs)
class fuelsupplierbalancesGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available FuelSupplierBalance objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierBalance.objects.all()
serializer_class = serializers.FuelSupplierBalanceSerializer
def get(self, request, *args, **kwargs):
"""
Lists available FuelSupplierBalance objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new FuelSupplierBalance object
"""
return self.create(request, *args, **kwargs)
class fuelsupplierbalancesIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific FuelSupplierBalance object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierBalance.objects.all()
serializer_class = serializers.FuelSupplierBalanceSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified FuelSupplierBalance object
"""
return self.destroy(request, *args, **kwargs)
class fuelsupplierbalancesIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific FuelSupplierBalance object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierBalance.objects.all()
serializer_class = serializers.FuelSupplierBalanceSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified FuelSupplierBalance object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified FuelSupplierBalance object
"""
return self.update(request, *args, **kwargs)
class fuelsuppliersCCDatumBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of FuelSupplierCCData object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierCCData.objects.all()
serializer_class = serializers.FuelSupplierCCDataSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new FuelSupplierCCData objects
"""
return self.create(request, *args, **kwargs)
class fuelsuppliersCCDatumGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available FuelSupplierCCData objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierCCData.objects.all()
serializer_class = serializers.FuelSupplierCCDataSerializer
def get(self, request, *args, **kwargs):
"""
Lists available FuelSupplierCCData objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new FuelSupplierCCData object
"""
return self.create(request, *args, **kwargs)
class fuelsuppliersCCDatumIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific FuelSupplierCCData object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierCCData.objects.all()
serializer_class = serializers.FuelSupplierCCDataSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified FuelSupplierCCData object
"""
return self.destroy(request, *args, **kwargs)
class fuelsuppliersCCDatumIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific FuelSupplierCCData object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierCCData.objects.all()
serializer_class = serializers.FuelSupplierCCDataSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified FuelSupplierCCData object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified FuelSupplierCCData object
"""
return self.update(request, *args, **kwargs)
class fuelsuppliercontactsBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of FuelSupplierContact object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierContact.objects.all()
serializer_class = serializers.FuelSupplierContactSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new FuelSupplierContact objects
"""
return self.create(request, *args, **kwargs)
class fuelsuppliercontactsGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available FuelSupplierContact objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = FuelSupplierContact.objects.all()
serializer_class = serializers.FuelSupplierContactSerializer
def get(self, request, *args, **kwargs):
"""
Lists available FuelSupplierContact objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, | |
import numpy as np
import cv2
import time
from operator import itemgetter
import os
import Queue
from sklearn import svm, ensemble
from skimage.feature import hog, local_binary_pattern, greycomatrix, greycoprops
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn.externals import joblib
import pylab as plt
from sklearn.preprocessing import normalize
from skimage.filters import roberts, sobel, scharr, prewitt, gaussian
from skimage.color import rgb2hed
import pandas
from skimage import io
from skimage import color
from skimage.transform import hough_circle, hough_ellipse
from skimage.feature import peak_local_max, canny
from skimage.draw import circle_perimeter, ellipse_perimeter
from skimage.filters.rank import entropy
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk
from skimage.feature import blob_dog, blob_log, blob_doh
from math import sqrt
from skimage.color import rgb2gray
dx = (0, 1, 0, -1, 1, -1, 1, -1)
dy = (1, 0, -1, 0, -1, 1, 1, -1)
def inside(i, j, dfsImg):
return i>=0 and j>=0 and i < dfsImg.shape[0] and j < dfsImg.shape[1]
def bfs(i, j, maxI, maxJ, minI, minJ, threshold, visited, dfsImg):
visited[i][j] = 1
q = Queue.Queue(maxsize=0)
q.put((i, j))
while(not q.empty()):
cur = q.get()
visited[cur[0]][cur[1]] = 1
maxI = max(maxI, cur[0])
maxJ = max(maxJ, cur[1])
minI = min(minI, cur[0])
minJ = min(minJ, cur[1])
for k in range(8):
ni = cur[0] + dx[k]
nj = cur[1] + dy[k]
if(inside(ni, nj, dfsImg) and visited[ni][nj] == 0 and dfsImg[ni][nj] >= threshold):
visited[ni][nj] = 1
q.put((ni, nj))
return [minJ, minI, maxJ, maxI], visited
def non_max_suppression_fast(boxes, overlapThresh):
# Malisiewicz et al.
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick].astype("int")
def getRGBpath(rgb_path, image_name):
i = image_name.find("bin")
image_name= image_name[:i] + "rgb" + image_name[i+3:];
return os.path.join(rgb_path, image_name)
def query(ac_sum, topLeft_i, topLeft_j, bottomRight_i, bottomRight_j):
if(topLeft_i==0 and topLeft_j==0):
return ac_sum[bottomRight_i][bottomRight_j]
if(topLeft_i==0):
return ac_sum[bottomRight_i][bottomRight_j] - ac_sum[bottomRight_i][topLeft_j - 1]
if(topLeft_j==0):
return ac_sum[bottomRight_i][bottomRight_j] - ac_sum[topLeft_i - 1][bottomRight_j]
return ac_sum[bottomRight_i][bottomRight_j] - ac_sum[topLeft_i - 1][bottomRight_j] - ac_sum[bottomRight_i][topLeft_j - 1] + ac_sum[topLeft_i - 1][topLeft_j - 1]
def createAcumulative(bin_im):
#create an acumulative matrix to make faster querys
n, m = bin_im.shape[:2]
ac_sum = np.zeros((n, m))
bin_im = cv2.cvtColor(bin_im, cv2.COLOR_BGR2GRAY)
for i in range(n):
for j in range(m):
if(bin_im[i][j]<20):
bit = 1;
else:
bit = 0;
if(i==0 and j==0):
ac_sum[i][j] = bit;
elif(i==0):
ac_sum[i][j] = ac_sum[i][j-1] + bit;
elif(j==0):
ac_sum[i][j] = ac_sum[i-1][j] + bit;
else:
ac_sum[i][j] = ac_sum[i-1][j] + ac_sum[i][j-1] - ac_sum[i-1][j-1] + bit;
return ac_sum
def preprocess(img, plot, detected_img = [], name = "", type = 1):
if type == 1:
img = cv2.bilateralFilter(img,9, 75, 75)
img = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
_ , _ , img = cv2.split(img) #get V chanel
img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,7,16)
else:
img = cv2.bilateralFilter(img,9, 75, 75)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = sobel(img)
img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX)
edges = canny(img, 3, 10, 40)
if plot:
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
ax0.imshow(img, cmap=plt.cm.gray)
ax0.set_title('Preprocess')
ax0.axis('off')
ax1.imshow(edges, cmap=plt.cm.gray)
ax1.set_title('Edges')
ax1.axis('off')
ax2.imshow(cv2.cvtColor(detected_img, cv2.COLOR_BGR2RGB))
ax2.set_title('Detected')
ax2.axis('off')
plt.tight_layout()
#plt.show()
fig.set_size_inches(28, 26)
fig.savefig(os.path.join('/home/rodolfo/Pictures/', name + '_SCA' + '.jpg'), bbox_inches='tight')
return edges
def detectCircles(img, useSobel = True, type = 1):
if useSobel :
img = preprocess(img, False, type = type)
else:
img = canny(img, 2, 10, 40)
# Detect two radii
hough_radii = np.arange(50, 70, 2)
hough_res = hough_circle(img, hough_radii)
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract one circle
num_peaks = 1
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
return accums, centers, radii
def getPercentage(fileName):
fileName = fileName[fileName.find("_")+1:]
fileName = fileName[fileName.find("_")+1:]
fileName = fileName[:fileName.find("_")]
return fileName
def getClass(percentage, classNumber):
if(percentage==100):
percentage-=1
div = 100//classNumber
return percentage//div
def getBinaryClass(percentage, threshold):
if(percentage>=threshold):
return 1
return 0
def trainEigenModel_justFaces(modelPath):
model = cv2.face.createEigenFaceRecognizer()
#negatives_path = '/home/rodolfo/Pictures/ds2/patches/3_50_50_20_noSharper/'
negatives_path = '/home/rodolfo/Pictures/dances-data/ds4/patches/3_50_50_20_noSharper/'
negatives_training_names = os.listdir(negatives_path)
#positives_path = '/home/rodolfo/Pictures/ds2/patches/just-faces/'
positives_path = '/home/rodolfo/Pictures/dances-data/ds4/patches/just-faces/'
positives_training_names = os.listdir(positives_path)
lim = 2*len(positives_training_names)
threshold = 60
classNumber = 2
#read images and set labels
data = []
labels = []
for name in negatives_training_names:
if(classNumber == 2):
classID = getBinaryClass(int(getPercentage(name)), threshold)
else:
classID = getClass(int(getPercentage(name)), classNumber)
if classID == 1: #just get negatives data
continue
img = cv2.imread(negatives_path + name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (40,40)) #convert to a 8 multiple dimension to let face recoginizer work propertly https://goo.gl/LI5zL7
data.append(img)
labels.append(classID)
if len(data) == lim:
break
for name in positives_training_names:
img = cv2.imread(positives_path + name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (40,40)) #convert to a 8 multiple dimension to let face recoginizer work propertly https://goo.gl/LI5zL7
data.append(img)
labels.append(1)
print 'training model has began'
model.train(np.array(data), np.array(labels))
print 'saving model'
model.save(modelPath)
print 'Eigenfaces model saved'
return model
def trainFisherModel_justFaces(modelPath):
model = cv2.face.createFisherFaceRecognizer()
#negatives_path = '/home/rodolfo/Pictures/ds2/patches/3_50_50_20_noSharper/'
negatives_path = '/home/rodolfo/Pictures/dances-data/ds4/patches/3_50_50_20_noSharper/'
negatives_training_names = os.listdir(negatives_path)
#positives_path = '/home/rodolfo/Pictures/ds2/patches/just-faces/'
positives_path = '/home/rodolfo/Pictures/dances-data/ds4/patches/just-faces/'
positives_training_names = os.listdir(positives_path)
lim = 4*len(positives_training_names)
threshold = 10
classNumber = 2
#read images and set labels
data = []
labels = []
for name in negatives_training_names:
if(classNumber == 2):
classID = getBinaryClass(int(getPercentage(name)), threshold)
else:
classID = getClass(int(getPercentage(name)), classNumber)
if classID == 1: #just get negatives data
continue
img = cv2.imread(negatives_path + name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (40,40)) #convert to a 8 multiple dimension to let face recoginizer work propertly https://goo.gl/LI5zL7
data.append(img)
labels.append(classID)
if len(data) == lim:
break
for name in positives_training_names:
img = cv2.imread(positives_path + name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (40,40)) #convert to a 8 multiple dimension to let face recoginizer work propertly https://goo.gl/LI5zL7
data.append(img)
labels.append(1)
print 'training model has began'
model.train(np.array(data), np.array(labels))
print 'saving model'
model.save(modelPath)
print 'Fisherfaces model saved'
return model
def runFisherFaces(originalImage, rectangles, name, plot = True):
#train FisherFaces Model or recover if it already exists
#path to normal trained data
#modelPath = '/home/rodolfo/Pictures/ds2/models/fisherFaces_filter.ylm'
#path to model trained with just faces data
#modelPath = '/home/rodolfo/Pictures/ds2/models/fisherFaces_filter_justFaces.ylm'
modelPath = 'models/fisherFaces_filter_justFaces_full.ylm'
if os.path.isfile(modelPath): #recover trained model
fisherFacesModel = cv2.face.createFisherFaceRecognizer()
fisherFacesModel.load(modelPath)
else: #train model
#fisherFacesModel = trainFisherModel(modelPath)
fisherFacesModel = trainFisherModel_justFaces(modelPath)
it = 1
w, h = 50, 50
shift = 5
rectanglesImage = originalImage.copy()
rectangles_ans = []
for rectangle in rectangles:
img = originalImage[rectangle[1]:rectangle[3], rectangle[0]:rectangle[2]]
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
n, m = img.shape
detectedImage = np.zeros((n, m))
for i in xrange(0, n-h, shift):
for j in xrange(0, m-w, shift):
cropped = img[i:i+h, j:j+w].copy()
cropped = cv2.resize(cropped, (40, 40)) #convert to a 8 multiple dimension to let face recoginizer work propertly https://goo.gl/LI5zL7
label, confidence = fisherFacesModel.predict(cropped)
if label == 0:
continue
detectedImage[i:i+h, j:j+w]+=label*confidence
if plot:
#plot image with scores
fig = plt.figure(figsize=(30, 20))
a=fig.add_subplot(len(rectangles),2,it)
a.set_title(str(it))
plt.imshow(detectedImage)
#print rectangles and GLCM properties
plotColor = (150, 255, 150)
negativeplotColor = (150, 150, 255)
if round(detectedImage.sum()/(n*m), 3) < 8.0:
cv2.rectangle(rectanglesImage, (rectangle[0], rectangle[1]), (rectangle[2], rectangle[3]), negativeplotColor, 2)
else:
cv2.rectangle(rectanglesImage, (rectangle[0], rectangle[1]), (rectangle[2], rectangle[3]), plotColor, 2)
cv2.putText(rectanglesImage, str(it), (rectangle[0], rectangle[3]-5), 1, 1, plotColor , 2, cv2.LINE_AA )
cv2.putText(rectanglesImage, str(round(detectedImage.sum()/(n*m), 3)), (rectangle[0], rectangle[3]-20), 1, 1, plotColor , | |
<filename>qoc/core/lindbladdiscrete.py
"""
linbladdiscrete.py - This module defines methods to
evolve a set of density matrices under the
lindblad master equation using time-discrete
control parameters.
"""
from autograd.extend import Box
import numpy as np
from qoc.core.common import (clip_control_norms,
initialize_controls,
slap_controls, strip_controls,)
from qoc.core.mathmethods import (integrate_rkdp5,
interpolate_linear_set,
get_lindbladian,)
from qoc.models import (Dummy,
EvolveLindbladDiscreteState,
EvolveLindbladResult,
InterpolationPolicy,
OperationPolicy,
GrapeLindbladDiscreteState,
GrapeLindbladResult,
ProgramType,)
from qoc.standard import (Adam, ans_jacobian, commutator,
conjugate_transpose,
matmuls,)
### MAIN METHODS ###
def evolve_lindblad_discrete(evolution_time, initial_densities,
system_eval_count,
controls=None,
cost_eval_step=1,
costs=list(),
hamiltonian=None,
interpolation_policy=InterpolationPolicy.LINEAR,
lindblad_data=None,
save_file_path=None,
save_intermediate_densities=False):
"""
Evolve a set of density matrices under the lindblad equation
and compute the optimization error.
Arguments:
evolution_time :: float - This value specifies the duration of the
system's evolution.
initial_densities :: ndarray (density_count x hilbert_size x hilbert_size)
- This array specifies the densities that should be evolved under
the specified system. These are the densities at the beginning of
evolution.
system_eval_count :: int >= 2 - This value determines how many times
during the evolution the system is evaluated, including the
initial value of the system. For lindblad evolution, this value does not
determine the time step of integration.
This value is used as:
`system_eval_times` = numpy.linspace(0, `evolution_time`, `system_eval_count`).
controls :: ndarray (control_step_count x control_count)
- This array specifies the control parameter values at each
control step. These values will be used to determine the `controls`
argument passed to the `hamiltonian` function.
cost_eval_step :: int >= 1- This value determines how often step-costs are evaluated.
The units of this value are in system_eval steps. E.g. if this value is 2,
step-costs will be computed every 2 system_eval steps.
costs :: iterable(qoc.models.cost.Cost) - This list specifies all
the cost functions that the optimizer should evaluate. This list
defines the criteria for an "optimal" control set.
hamiltonian :: (controls :: ndarray (control_count), time :: float)
-> hamiltonian_matrix :: ndarray (hilbert_size x hilbert_size)
- This function provides the system's hamiltonian given a set
of control parameters and a time value.
interpolation_policy :: qoc.models.interpolationpolicy.InterpolationPolicy
- This value specifies how control parameters should be
interpreted at points where they are not defined.
lindblad_data :: (time :: float)
-> (dissipators :: ndarray (operator_count),
operators :: ndarray (operator_count x hilbert_size x hilbert_size))
- This function encodes the lindblad dissipators and operators for all time.
save_file_path :: str - This is the full path to the file where
information about program execution will be stored.
E.g. "./out/foo.h5"
save_intermediate_densities :: bool - If this value is set to True,
qoc will write the densities to the save file after every
system_eval step.
Returns:
result
"""
if controls is not None:
control_eval_count = controls.shape[0]
else:
control_eval_count = 0
pstate = EvolveLindbladDiscreteState(control_eval_count,
cost_eval_step, costs,
evolution_time, hamiltonian,
initial_densities,
interpolation_policy,
lindblad_data, save_file_path,
save_intermediate_densities,
system_eval_count)
pstate.save_initial(controls)
result = EvolveLindbladResult()
_ = _evaluate_lindblad_discrete(controls, pstate, result)
return result
def grape_lindblad_discrete(control_count, control_eval_count,
costs, evolution_time, initial_densities,
system_eval_count,
complex_controls=False,
cost_eval_step=1,
hamiltonian=None,
impose_control_conditions=None,
initial_controls=None,
interpolation_policy=InterpolationPolicy.LINEAR,
iteration_count=1000,
lindblad_data=None,
log_iteration_step=10,
max_control_norms=None,
min_error=0,
optimizer=Adam(),
save_file_path=None,
save_intermediate_densities=False,
save_iteration_step=0,):
"""
This method optimizes the evolution of a set of states under the lindblad
equation for time-discrete control parameters.
Arguments:
control_count :: int - This is the number of control parameters that qoc should
optimize over. I.e. it is the length of the `controls` array passed
to the hamiltonian.
control_eval_count :: int >= 2 - This value determines where definite values
of the control parameters are evaluated. This value is used as:
`control_eval_times`= numpy.linspace(0, `evolution_time`, `control_eval_count`).
costs :: iterable(qoc.models.cost.Cost) - This list specifies all
the cost functions that the optimizer should evaluate. This list
defines the criteria for an "optimal" control set.
evolution_time :: float - This value specifies the duration of the
system's evolution.
initial_densities :: ndarray (density_count x hilbert_size x hilbert_size)
- This array specifies the densities that should be evolved under
the specified system. These are the densities at the beginning of
evolution.
system_eval_count :: int >= 2 - This value determines how many times
during the evolution the system is evaluated, including the
initial value of the system.
For lindblad evolution, this value does not determine the time step of integration.
This value is used as:
`system_eval_times` = numpy.linspace(0, `evolution_time`, `system_eval_count`).
complex_controls :: bool - This value determines if the control parameters
are complex-valued. If some controls are real only or imaginary only
while others are complex, real only and imaginary only controls
can be simulated by taking the real or imaginary part of a complex control.
cost_eval_step :: int >= 1- This value determines how often step-costs are evaluated.
The units of this value are in system_eval steps. E.g. if this value is 2,
step-costs will be computed every 2 system_eval steps.
hamiltonian :: (controls :: ndarray (control_count), time :: float)
-> hamiltonian_matrix :: ndarray (hilbert_size x hilbert_size)
- This function provides the system's hamiltonian given a set
of control parameters and a time value.
impose_control_conditions :: (controls :: (control_eval_count x control_count))
-> (controls :: (control_eval_count x control_count))
- This function is called after every optimization update. Example uses
include setting boundary conditions on the control parameters.
initial_controls :: ndarray (control_step_count x control_count)
- This array specifies the control parameters at each
control_eval step. These values will be used to determine the `controls`
argument passed to the `hamiltonian` function at each time step for
the first iteration of optimization.
interpolation_policy :: qoc.models.interpolationpolicy.InterpolationPolicy
- This value specifies how control parameters should be
interpreted at points where they are not defined.
iteration_count :: int - This value determines how many total system
evolutions the optimizer will perform to determine the
optimal control set.
lindblad_data :: (time :: float)
-> (dissipators :: ndarray (operator_count),
operators :: ndarray (operator_count x hilbert_size x hilbert_size))
- This function encodes the lindblad dissipators and operators for all time.
log_iteration_step :: int - This value determines how often qoc logs
progress to stdout. This value is specified in units of system steps,
of which there are `control_step_count` * `system_step_multiplier`.
Set this value to 0 to disable logging.
max_control_norms :: ndarray (control_count) - This array
specifies the element-wise maximum norm that each control is
allowed to achieve. If, in optimization, the value of a control
exceeds its maximum norm, the control will be rescaled to
its maximum norm. Note that for non-complex values, this
feature acts exactly as absolute value clipping.
min_error :: float - This value is the threshold below which
optimization will terminate.
optimizer :: class instance - This optimizer object defines the
gradient-based procedure for minimizing the total contribution
of all cost functions with respect to the control parameters.
save_file_path :: str - This is the full path to the file where
information about program execution will be stored.
E.g. "./out/foo.h5"
save_intermediate_densities :: bool - If this value is set to True,
qoc will write the densities to the save file after every
system_eval step.
save_iteration_step :: int - This value determines how often qoc
saves progress to the save file specified by `save_file_path`.
This value is specified in units of system steps, of which
there are `control_step_count` * `system_step_multiplier`.
Set this value to 0 to disable saving (default).
Returns:
result
"""
# Initialize the controls.
initial_controls, max_control_norms = initialize_controls(complex_controls,
control_count,
control_eval_count,
evolution_time,
initial_controls,
max_control_norms,)
# Construct the program state.
pstate = GrapeLindbladDiscreteState(complex_controls,
control_count,
control_eval_count, cost_eval_step, costs,
evolution_time, hamiltonian,
impose_control_conditions,
initial_controls,
initial_densities,
interpolation_policy, iteration_count,
lindblad_data,
log_iteration_step, max_control_norms,
min_error, optimizer,
save_file_path, save_intermediate_densities,
save_iteration_step,
system_eval_count,)
pstate.log_and_save_initial()
# Autograd does not allow multiple return values from
# a differentiable function.
# Scipy's minimization algorithms require us to provide
# functions that they evaluate on their own schedule.
# The best solution to track mutable objects, that I can think of,
# is to use a reporter object.
reporter = Dummy()
reporter.iteration = 0
result = GrapeLindbladResult()
# Convert the controls from cost function format to optimizer format.
initial_controls = strip_controls(pstate.complex_controls, pstate.initial_controls)
# Run the optimization.
pstate.optimizer.run(_eld_wrap, pstate.iteration_count, initial_controls,
_eldj_wrap, args=(pstate, reporter, result))
return result
### HELPER METHODS ###
def _eld_wrap(controls, pstate, reporter, | |
<reponame>klayveR/python-poe-timeless-jewel
import org.sikuli.script.SikulixForJython
import math
import sys
import time
import os
import json
from sikuli.Sikuli import *
sys.path.append("scripts/")
from helpers import Helpers
# Sikuli settings
Settings.MoveMouseDelay = 0
Settings.ActionLogs = False
Settings.InfoLogs = False
Settings.DebugLogs = False
# Global variables
cfg = {
"title": "Skill Capture",
"radius": 427,
"offsets": {
"regular": 8,
"notable": 12,
"keystone": 25,
"poe": 300,
"radius": 30,
"jewel": 200
},
"sim": {
"jewel": 0.95,
"jewelSocketed": 0.9,
"notable": 0.7,
"notableAlloc": 0.75,
"regular": 0.87,
"regularAlloc": 0.8,
"zoom": 0.9
},
"txtbox": {
"xOffset": 30,
"yOffset": 140,
"width": 680,
"height": 480
},
"mouse": {
"zoom": {
"x": 727,
"y": 69,
"offset": 37
}
}
}
dirs = {
"images": os.path.join(os.getcwd(), "images"),
"data": os.path.join(os.getcwd(), "data"),
"timeless": os.path.join(os.getcwd(), "data/timeless"),
"jewel": os.path.join(os.getcwd(), "data/jewel"),
"jewelDone": os.path.join(os.getcwd(), "data/jewel_done")
}
images = {
"jewel": os.path.join(dirs["images"], "Jewel.png"),
"jewelSocketed": os.path.join(dirs["images"], "JewelSocketed.png"),
"notable": os.path.join(dirs["images"], "Notable.png"),
"notableAlloc": os.path.join(dirs["images"], "NotableAllocated.png"),
"regular": os.path.join(dirs["images"], "Skill.png"),
"regularAlloc": os.path.join(dirs["images"], "SkillAllocated.png"),
"zoom": os.path.join(dirs["images"], "Zoom.png"),
"zoomedOut": os.path.join(dirs["images"], "ZoomOut.png"),
"timelessJewel": os.path.join(dirs["images"], "TimelessJewel.png"),
"conquered": os.path.join(dirs["images"], "Conquered.png"),
}
regions = {
"poe": None,
"radius": None,
"jewel": None
}
jewel = {
"id": 0,
"nodes": [],
"type": "",
"variant": "",
"seed": 0
}
capturedJewels = []
def end(event):
popup("Script stopped", cfg["title"])
sys.exit()
return
def start(event):
global regions
global jewel
# Check if PoE is running
regions["poe"] = App("Path of Exile").window()
if not regions["poe"]:
popup("Path of Exile is not running", cfg["title"])
return
# Check if PoE is running in 1920x1080 borderless
if not (regions["poe"].getW() == 1920 and regions["poe"].getH() == 1080):
popup("Path of Exile must be running in 1920x1080 borderless fullscreen", cfg["title"])
return
# Check if zoom is correct
if not checkZoom():
popup("Zoom is set incorrectly.\n\nPlease zoom all the way out and press F3 to automatically zoom in correctly.", cfg["title"])
return
# If no jewel region has been defined yet, do that
if not regions["jewel"]:
print "Locating previously captured jewels..."
if locateAndStoreCapturedJewel():
popup("It looks like you capture this jewel before, so you're good to go!\n\nPlease open your inventory, hover over the jewel you want to analyze and press F2.\n\nMake sure to not move the passive skill tree, otherwise the script will attempt to find the a new jewel position.", cfg["title"])
return
print "None found, locating empty jewel socket..."
result = locateEmptyJewel()
if result:
jewel["id"] = int(time.time())
# Find nodes in radius, save node coordinates and type and capture passive text
jewel["nodes"] = locateAllNodes()
jewelDirectory = os.path.join(dirs["jewel"], str(jewel["id"]))
captureTextFromNodes(jewel["nodes"], jewelDirectory)
mouseMove(Location(100, 100))
wait(0.1)
capture(regions["jewel"].nearby(cfg["offsets"]["jewel"]), dirs["jewel"], str(jewel["id"]) + ".png")
saveNodeData(jewel["nodes"], jewelDirectory, jewel["id"])
popup("Successfully captured jewel and " + str(len(jewel['nodes'])) + " nodes.\n\nPlease open your inventory, hover over the jewel you want to analyze and press F2.\n\nMake sure to not move the passive skill tree, otherwise the script will attempt to find the a new jewel position.", cfg["title"])
return
# If jewel region has been defined
if regions["jewel"]:
inCorrectPosition = isEmptyJewelInCorrectPosition()
# If jewel data hasn't been extracted yet
if jewel["seed"] == 0:
Env.setClipboard("")
type('c', KeyModifier.CTRL)
wait(0.1)
data = Helpers.extractJewelData(Env.getClipboard())
Env.setClipboard("")
if data != False:
jewel["seed"] = data["seed"]
jewel["type"] = data["type"]
jewel["variant"] = data["variant"]
popup("Successfully extracted Timeless Jewel data.\n\nPlease socket the Timeless Jewel and start the procedure.\n\nMake sure to not move the passive skill tree, otherwise the script will attempt to find the a new jewel position.", cfg["title"])
else:
if inCorrectPosition:
popup("Couldn't extract Timeless Jewel data.\n\nPlease open your inventory, hover over the jewel you want to analyze and press F2.\n\nMake sure to not move the passive skill tree, otherwise the script will attempt to find the a new jewel position.", cfg["title"])
else:
popup("The jewel socket can't be found at its previous location anymore.\n\nThe script will look for the new position next time.\nMake sure you haven't socketed a jewel into your target socket.", cfg["title"])
regions["jewel"] = None
jewel["nodes"] = []
jewel["seed"] = 0
return
return
# If jewel is socketed, read nodes
if isJewelSocketed():
jewelId = int(time.time())
jewelDirectory = os.path.join(dirs["timeless"], str(jewel["id"]) + "_" + str(jewelId))
captureTextFromNodes(jewel["nodes"], jewelDirectory)
saveTimelessJewelData(jewel, jewelDirectory)
jewel["seed"] = 0
popup("Successfully captured " + str(len(jewel['nodes'])) + " nodes.\n\nYou can now run the analyzer to receive results for your jewel\nor press F2 while hovering over another jewel in your inventory.\n\nYou can also move to another jewel socket and start from the beginning by pressing F2.", cfg["title"])
return
# If jewel is not socketed but empty socket is still in correct position
if inCorrectPosition:
popup("Please socket the timeless jewel into the jewel socket.", cfg["title"])
else:
popup("The jewel socket can't be found at its previous location anymore.\n\nThe script will look for the new position next time.\nMake sure you haven't socketed a jewel into your target socket.", cfg["title"])
regions["jewel"] = None
jewel["nodes"] = []
jewel["seed"] = 0
return
return
return
def loadCapturedJewels():
global dirs
files = Helpers.getFilesByExtFromDir(dirs["jewelDone"], ".png")
capturedJewels = []
for f in files:
jewelId = f.replace(".png", "")
result = { "id": jewelId }
with open(os.path.join(dirs["jewelDone"], jewelId + ".json")) as jsonFile:
result["json"] = json.load(jsonFile)
result["image"] = f
capturedJewels.append(result)
print "Loaded jewel with ID " + str(jewelId)
return capturedJewels
def locateAndStoreCapturedJewel():
global jewel
global regions
for j in capturedJewels:
imagePath = os.path.join(dirs["jewelDone"], j["image"])
match = regions["poe"].exists(Pattern(imagePath))
if match:
jewel["id"] = j["id"]
jewel["nodes"] = []
regions["jewel"] = match.nearby(-cfg["offsets"]["jewel"])
jewelCenter = regions["jewel"].getCenter()
for node in j["json"]["nodes"]:
x = jewelCenter.getX() + int(node["x"] * cfg["radius"])
y = jewelCenter.getY() + int(node["y"] * cfg["radius"])
region = Region(x, y, 0, 0).nearby(10)
jewel["nodes"].append({
"type": node["type"],
"region": region
})
return True
return False
def locateEmptyJewel():
global regions
global images
global cfg
image = Pattern(images["jewel"]).similar(cfg["sim"]["jewel"])
# Use a smaller region than the full window to find jewel
matches = regions["poe"].nearby(-cfg["offsets"]["poe"]).findAllList(image)
foundJewel = None
# If only found 1 match, use it
if len(matches) == 1:
foundJewel = matches[0]
else: # Otherwise, ask user to select correct jewel
for m in matches:
m.highlight()
answer = popAsk("Is the highlighted jewel the jewel you want to use?")
if answer:
foundJewel = m
break
if foundJewel:
regions["jewel"] = foundJewel
jewelCenter = foundJewel.getCenter()
regions["radius"] = Region(jewelCenter.getX() - cfg["radius"], jewelCenter.getY() - cfg["radius"], cfg["radius"]*2, cfg["radius"]*2)
# Make the radius region slightly bigger to make sure it also captures "cut off" nodes
regions["radius"] = regions["radius"].nearby(cfg["offsets"]["radius"])
return True
popup("No empty jewel socket found. Make sure the jewel socket is located in the middle region of the screen.", cfg["title"])
return False
# Checks if a jewel has been socketed into the previously empty jewel socket
def isJewelSocketed():
global regions
global images
image = Pattern(images["jewelSocketed"]).similar(cfg["sim"]["jewelSocketed"])
return regions["jewel"].exists(image)
# Checks if the empty jewel socket is still in the same position
def isEmptyJewelInCorrectPosition():
global regions
global images
image = Pattern(images["jewel"]).similar(cfg["sim"]["jewel"])
return regions["jewel"].exists(image)
# Locates all nodes in the radius region, filters nodes outside of circle radius, filters jewel sockets
# and highlights them to prevent them from being detected twice
def locateAllNodes():
global cfg
allocNotableRegions = locateNodes("notableAlloc", cfg["sim"]["notableAlloc"])
allocatedNotables = filterInvalidNodeRegions(allocNotableRegions, "notable")
highlightNodes(allocatedNotables, "notable")
notableRegions = locateNodes("notable", cfg["sim"]["notable"])
notables = filterInvalidNodeRegions(notableRegions, "notable")
highlightNodes(notables, "notable")
regularRegions = locateNodes("regular", cfg["sim"]["regular"])
regulars = filterInvalidNodeRegions(regularRegions, "regular")
highlightNodes(regulars, "regular")
allocRegularRegions = locateNodes("regularAlloc", cfg["sim"]["regularAlloc"])
allocatedRegulars = filterInvalidNodeRegions(allocRegularRegions, "regular")
highlightNodes(allocatedRegulars, "regular")
highlightAllOff()
return notables + allocatedNotables + regulars + allocatedRegulars
# Saves coordinates and type of nodes in circle radius of jewel to a json file
def saveNodeData(nodes, directory, id):
global regions
global cfg
global dirs
Helpers.createDirectory(directory)
fullPath = os.path.join(directory, "data.json")
jsonNodes = []
for n in nodes:
relativeCoords = Helpers.calcRelativeDistFromPoint(regions["jewel"].getCenter(), n["region"].getCenter(), cfg["radius"])
jsonNodes.append({
"x": relativeCoords[0],
"y": relativeCoords[1],
"type": n["type"]
})
capturedJewels.append({
"id": id,
"json": { "nodes": jsonNodes },
"image": os.path.join(dirs["jewel"], str(id) + ".png")
})
with open(fullPath, 'w') as f:
json.dump(jsonNodes, f, indent=4, sort_keys=True)
return
# Saves type, seed and variant of timeless jewel to a json file
def saveTimelessJewelData(data, directory):
Helpers.createDirectory(directory)
fullPath = os.path.join(directory, "data.json")
newData = {
"type": data["type"],
"seed": data["seed"],
"variant": data["variant"]
}
with open(fullPath, 'w') as f:
json.dump(newData, f, indent=4, sort_keys=True)
return
# Captures the text boxes of passives and saves them into a directory
def captureTextFromNodes(nodes, directory):
global cfg
Helpers.createDirectory(directory)
cnt = 0
for n in nodes:
mouseMove(n["region"])
wait(0.1)
nodeCenter = n["region"].getCenter()
y = nodeCenter.getY() - cfg["txtbox"]["yOffset"]
if y + cfg["txtbox"]["height"] > regions["poe"].getH():
y = regions["poe"].getH() - cfg["txtbox"]["height"]
textRegion = Region(nodeCenter.getX() + cfg["txtbox"]["xOffset"], y, cfg["txtbox"]["width"], cfg["txtbox"]["height"])
capture(textRegion, directory, str(cnt))
cnt += 1
# Checks if the passive skill tree zoom is set correctly
def checkZoom():
global images
global cfg
image | |
<reponame>cthomasUpraizall/dateutil<filename>dateutil/parser/isoparser.py
# -*- coding: utf-8 -*-
"""
This module offers a parser for ISO-8601 strings
It is intended to support all valid date, time and datetime formats per the
ISO-8601 specification.
..versionadded:: 2.7.0
"""
from datetime import datetime, timedelta, time, date
import calendar
from dateutil import tz
from functools import wraps
import re
import six
__all__ = ["isoparse", "isoparser"]
def _takes_ascii(f):
@wraps(f)
def func(self, str_in, *args, **kwargs):
# If it's a stream, read the whole thing
str_in = getattr(str_in, 'read', lambda: str_in)()
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
if isinstance(str_in, six.text_type):
# ASCII is the same in UTF-8
try:
str_in = str_in.encode('ascii')
except UnicodeEncodeError as e:
msg = 'ISO-8601 strings should contain only ASCII characters'
six.raise_from(ValueError(msg), e)
return f(self, str_in, *args, **kwargs)
return func
class isoparser(object):
def __init__(self, sep=None):
"""
:param sep:
A single character that separates date and time portions. If
``None``, the parser will accept any single character.
For strict ISO-8601 adherence, pass ``'T'``.
"""
if sep is not None:
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
raise ValueError('Separator must be a single, non-numeric ' +
'ASCII character')
sep = sep.encode('ascii')
self._sep = sep
@_takes_ascii
def isoparse(self, dt_str):
"""
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
An ISO-8601 datetime string consists of a date portion, followed
optionally by a time portion - the date and time portions are separated
by a single character separator, which is ``T`` in the official
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
combined with a time portion.
Supported date formats are:
Common:
- ``YYYY``
- ``YYYY-MM`` or ``YYYYMM``
- ``YYYY-MM-DD`` or ``YYYYMMDD``
Uncommon:
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
The ISO week and day numbering follows the same logic as
:func:`datetime.date.isocalendar`.
Supported time formats are:
- ``hh``
- ``hh:mm`` or ``hhmm``
- ``hh:mm:ss`` or ``hhmmss``
- ``hh:mm:ss.sss`` or ``hh:mm:ss.ssssss`` (3-6 sub-second digits)
Midnight is a special case for `hh`, as the standard supports both
00:00 and 24:00 as a representation. The decimal separator can be
either a dot or a comma.
.. caution::
Support for fractional components other than seconds is part of the
ISO-8601 standard, but is not currently implemented in this parser.
Supported time zone offset formats are:
- `Z` (UTC)
- `±HH:MM`
- `±HHMM`
- `±HH`
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
with the exception of UTC, which will be represented as
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
:param dt_str:
A string or stream containing only an ISO-8601 datetime string
:return:
Returns a :class:`datetime.datetime` representing the string.
Unspecified components default to their lowest value.
.. warning::
As of version 2.7.0, the strictness of the parser should not be
considered a stable part of the contract. Any valid ISO-8601 string
that parses correctly with the default settings will continue to
parse correctly in future versions, but invalid strings that
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
guaranteed to continue failing in future versions if they encode
a valid date.
.. versionadded:: 2.7.0
"""
components, pos = self._parse_isodate(dt_str)
if len(dt_str) > pos:
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
components += self._parse_isotime(dt_str[pos + 1:])
else:
raise ValueError('String contains unknown ISO components')
if len(components) > 3 and components[3] == 24:
components[3] = 0
return datetime(*components) + timedelta(days=1)
return datetime(*components)
@_takes_ascii
def parse_isodate(self, datestr):
"""
Parse the date portion of an ISO string.
:param datestr:
The string portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.date` object
"""
components, pos = self._parse_isodate(datestr)
if pos < len(datestr):
raise ValueError('String contains unknown ISO ' +
'components: {}'.format(datestr))
return date(*components)
@_takes_ascii
def parse_isotime(self, timestr):
"""
Parse the time portion of an ISO string.
:param timestr:
The time portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.time` object
"""
components = self._parse_isotime(timestr)
if components[0] == 24:
components[0] = 0
return time(*components)
@_takes_ascii
def parse_tzstr(self, tzstr, zero_as_utc=True):
"""
Parse a valid ISO time zone string.
See :func:`isoparser.isoparse` for details on supported formats.
:param tzstr:
A string representing an ISO time zone offset
:param zero_as_utc:
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
:return:
Returns :class:`dateutil.tz.tzoffset` for offsets and
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
specified) offsets equivalent to UTC.
"""
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
# Constants
_MICROSECOND_END_REGEX = re.compile(b'[-+Z]+')
_DATE_SEP = b'-'
_TIME_SEP = b':'
_MICRO_SEPS = b'.,'
def _parse_isodate(self, dt_str):
try:
return self._parse_isodate_common(dt_str)
except ValueError:
return self._parse_isodate_uncommon(dt_str)
def _parse_isodate_common(self, dt_str):
len_str = len(dt_str)
components = [1, 1, 1]
if len_str < 4:
raise ValueError('ISO string too short')
# Year
components[0] = int(dt_str[0:4])
pos = 4
if pos >= len_str:
return components, pos
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
if has_sep:
pos += 1
# Month
if len_str - pos < 2:
raise ValueError('Invalid common month')
components[1] = int(dt_str[pos:pos + 2])
pos += 2
if pos >= len_str:
if has_sep:
return components, pos
else:
raise ValueError('Invalid ISO format')
if has_sep:
if dt_str[pos:pos + 1] != self._DATE_SEP:
raise ValueError('Invalid separator in ISO string')
pos += 1
# Day
if len_str - pos < 2:
raise ValueError('Invalid common day')
components[2] = int(dt_str[pos:pos + 2])
return components, pos + 2
def _parse_isodate_uncommon(self, dt_str):
if len(dt_str) < 4:
raise ValueError('ISO string too short')
# All ISO formats start with the year
year = int(dt_str[0:4])
has_sep = dt_str[4:5] == self._DATE_SEP
pos = 4 + has_sep # Skip '-' if it's there
if dt_str[pos:pos + 1] == b'W':
# YYYY-?Www-?D?
pos += 1
weekno = int(dt_str[pos:pos + 2])
pos += 2
dayno = 1
if len(dt_str) > pos:
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
raise ValueError('Inconsistent use of dash separator')
pos += has_sep
dayno = int(dt_str[pos:pos + 1])
pos += 1
base_date = self._calculate_weekdate(year, weekno, dayno)
else:
# YYYYDDD or YYYY-DDD
if len(dt_str) - pos < 3:
raise ValueError('Invalid ordinal day')
ordinal_day = int(dt_str[pos:pos + 3])
pos += 3
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
raise ValueError('Invalid ordinal day' +
' {} for year {}'.format(ordinal_day, year))
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
components = [base_date.year, base_date.month, base_date.day]
return components, pos
def _calculate_weekdate(self, year, week, day):
"""
Calculate the day of corresponding to the ISO year-week-day calendar.
This function is effectively the inverse of
:func:`datetime.date.isocalendar`.
:param year:
The year in the ISO calendar
:param week:
The week in the ISO calendar - range is [1, 53]
:param day:
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
:return:
Returns a :class:`datetime.date`
"""
if not 0 < week < 54:
raise ValueError('Invalid week: {}'.format(week))
if not 0 < day < 8: # Range is 1-7
raise ValueError('Invalid weekday: {}'.format(day))
# Get week 1 for the specific year:
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
# Now add the specific number of weeks and days to get what we want
week_offset = (week - 1) * 7 + (day - 1)
return week_1 + timedelta(days=week_offset)
def _parse_isotime(self, timestr):
len_str = len(timestr)
components = [0, 0, 0, 0, None]
pos = 0
comp = -1
if len(timestr) < 2:
raise ValueError('ISO time too short')
has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP
while pos < len_str and comp < 5:
comp += 1
if timestr[pos:pos + 1] in b'-+Z':
# Detect time zone boundary
components[-1] = self._parse_tzstr(timestr[pos:])
pos = len_str
break
if comp < 3:
# Hour, minute, second
components[comp] = int(timestr[pos:pos + 2])
pos += 2
if (has_sep and pos < len_str and
timestr[pos:pos + 1] == self._TIME_SEP):
pos += 1
if comp == 3:
# Microsecond
if timestr[pos:pos + 1] not in self._MICRO_SEPS:
continue
pos += 1
us_str = self._MICROSECOND_END_REGEX.split(timestr[pos:pos + 6],
1)[0]
components[comp] = int(us_str) | |
the attribute value string.
Args:
element: XML element object.
attr: String containing the attribute name.
Returns:
String representing the attribute value.
"""
return element.get(attr)
def get_attribute_value(self, element, attr):
"""
Returns the numeric attribute value.
Args:
element: XML element object.
attr: String containing the attribute name.
Returns:
Numeric value representing the attribute value.
"""
val = element.get(attr)
try:
if val.upper().startswith('0X') or val.upper().startswith('-0X'):
return int(val, 16)
return int(val)
except:
idc.msg('\nUnable to decode string as value: ' + val)
return 0
def get_cbsize(self):
"""
Returns the size of the addressable codebyte for the processor.
Returns:
Integer representing the number of 8-bit bytes in an
addressable codebyte.
"""
return (ida_idp.ph_get_cnbits() + 7) / 8
def get_datatype_flags(self, datatype, size):
"""
Returns the flags bitmask for the datatype.
Args:
datatype: String representing the datatype.
size: Integer representing the datatype size.
Returns:
Integer representing the bitmask.
"""
if datatype.lower().startswith("byte"):
return ida_bytes.byte_flag()
if datatype.lower().startswith("word"):
return ida_bytes.word_flag()
if datatype.lower().startswith("dword"):
return ida_bytes.dword_flag()
if datatype.lower().startswith("qword"):
return ida_bytes.qword_flag()
if datatype.lower().startswith("oword"):
return ida_bytes.oword_flag()
if datatype.lower().startswith("tbyte"):
return ida_bytes.tbyte_flag()
if datatype.lower().startswith("float"):
return ida_bytes.float_flag()
if datatype.lower().startswith("double"):
return ida_bytes.double_flag()
if datatype.lower().startswith("packed"):
return ida_bytes.packreal_flag()
if self.is_string_type(datatype):
return ida_bytes.strlit_flag()
if self.is_enumeration(datatype):
return ida_bytes.enum_flag()
if self.is_structure(datatype):
return ida_bytes.stru_flag()
# if size == 4: return
# ida_bytes.dword_flag()
return 0
def get_string_type(self, datatype):
if datatype.lower() == 'mbcstring':
return ida_nalt.STRTYPE_C_16
if datatype.lower().find('unicode') != -1:
if datatype.lower().find('pascal') != -1:
return ida_nalt.STRTYPE_LEN2_16
return ida_nalt.STRTYPE_C_16
if datatype.lower().find('pascal') != -1:
return ida_nalt.STRTYPE_C_16
return ida_nalt.STRTYPE_TERMCHR
def has_attribute(self, element, attr):
"""
Returns true if the XML element contains the named attribute.
Args:
element: XML element object
attr: String containing name of the attribute
Returns:
True if the element contains the named attribute, otherwise False.
"""
return attr in element.attrib
def is_enumeration(self, datatype):
"""
Returns true if datatype is an existing enumeration in the database.
Args:
datatype: String representing the datatype.
Returns:
True if the datatype is an enumeration in the database,
otherwise False.
"""
if ida_enum.get_enum(datatype) == BADNODE:
return False
return True
def is_int(self, s):
try:
int(s, 16)
return True
except:
return False
def is_pointer_type(self, dtype):
"""
Returns true if the datatype represents a pointer.
Args:
dtype: String representing the datatype.
Returns:
True if the datatype represents a pointer, otherwise False.
"""
if dtype.lower().startswith("pointer") or dtype.endswith('*'):
return True
return False
def is_string_type(self, datatype):
"""
Returns true if the datatype represents a string type.
Args:
datatype: String representing the datatype.
Returns:
True if the datatype represents a string, otherwise False.
"""
if datatype.lower().startswith("unicode"):
return True
if datatype.lower().startswith("string"):
return True
return False
def is_structure(self, datatype):
"""
Returns true if the datatype represents a structure in the database.
Args:
dtype: String representing the datatype.
Returns:
True if the datatype represents an existing structure,
otherwise False.
"""
if ida_struct.get_struc_id(datatype) == BADNODE:
return False
return True
def import_address_range(self, address_range):
"""
Processes ADDRESS_RANGE element.
Args:
address_range: XML element object containing start and end address
attributes for the address range.
Returns:
Tuple containing two integers, the start and end address values.
"""
start = self.get_address(address_range, START)
end = self.get_address(address_range, END)
self.update_counter(ADDRESS_RANGE)
return (start, end)
def import_bit_mask(self, bitmask, eid):
"""
Processes a BIT_MASK element as an enum bitmask member.
Args:
bitmask: XML element object representing the IDA enum bitmask.
eid: Integer representing the IDA enum id
"""
name = self.get_attribute(bitmask, NAME)
value = self.get_attribute_value(bitmask, VALUE)
ida_enum.set_bmask_name(eid, value, name)
cid = ida_enum.get_enum_member_by_name(name)
self.update_counter(BIT_MASK)
regcmt = bitmask.find(REGULAR_CMT)
if regcmt != None:
ida_enum.set_enum_member_cmt(cid, regcmt.text, False)
self.update_counter(BIT_MASK + ':' + REGULAR_CMT)
rptcmt = bitmask.find(REPEATABLE_CMT)
if rptcmt != None:
ida_enum.set_enum_member_cmt(cid, rptcmt.txt, True)
self.update_counter(BIT_MASK + ':' + REPEATABLE_CMT)
def import_bookmark(self, bookmark):
"""
Processes a BOOKMARK element.
Args:
bookmark: XML element object containing bookmark data.
"""
if self.options.Bookmarks.checked == False:
return
try:
addr = self.get_address(bookmark, ADDRESS)
if self.has_attribute(bookmark, TYPE):
typ = self.get_attribute(bookmark, TYPE)
category = ''
if self.has_attribute(bookmark, CATEGORY):
category = self.get_attribute(bookmark, CATEGORY)
description = ''
if self.has_attribute(bookmark, DESCRIPTION):
description = self.get_attribute(bookmark, DESCRIPTION)
if idc.is_mapped(addr) == False:
msg = ("import_bookmark: address %X not enabled in database"
% addr)
print msg
return
self.update_counter(BOOKMARK)
for slot in range(ida_moves.MAX_MARK_SLOT):
ea = idc.get_bookmark(slot)
if ea == BADADDR:
idc.put_bookmark(addr, 0, 0, 0, slot, description)
break
except:
msg = "** Exception occurred in import_bookmark **"
print "\n" + msg + "\n", sys.exc_type, sys.exc_value
def import_cmts(self, element, sid, typ):
"""
Processes REGULAR_CMT and REPEATABLE_CMT elements for structures.
Args:
element: XML element object containing a REGULAR_CMT or
REPEATABLE_CMT element
sid: Integer representing the structure id
typ: String indicating structure type (STRUCTURE or UNION)
"""
regcmt = element.find(REGULAR_CMT)
if regcmt != None:
ida_struct.set_struc_cmt(sid, regcmt.text, False)
self.update_counter(typ + ':' + REGULAR_CMT)
rptcmt = element.find(REPEATABLE_CMT)
if rptcmt != None:
ida_struct.set_struc_cmt(sid, rptcmt.text, True)
self.update_counter(typ + ':' + REPEATABLE_CMT)
def import_codeblock(self, code_block):
"""
Processes a CODE_BLOCK element by disassembling the address range.
Args:
code_block: XML element containing codeblock start and end
addresses.
"""
if self.options.CodeBlocks.checked == False:
return
start = self.get_address(code_block, START)
end = self.get_address(code_block, END)
ida_bytes.del_items(start, 3, end - start + 1)
addr = start
while (addr <= end):
length = ida_ua.create_insn(addr)
addr += ida_bytes.get_item_size(addr) * self.get_cbsize()
self.update_counter(CODE_BLOCK)
def import_comment(self, comment):
"""
Processes a COMMENT element by creating the comment at the address.
Args:
comment: XML element containing the comment address, type,
and text.
"""
if self.options.Comments.checked == False:
return
addr = self.get_address(comment, ADDRESS)
ctype = self.get_attribute(comment, TYPE)
text = comment.text
if ctype == 'pre':
ida_lines.add_extra_cmt(addr, True, text)
elif ctype == 'end-of-line':
idc.set_cmt(addr, text, False)
elif ctype == 'repeatable':
idc.set_cmt(addr, text, True)
elif ctype == 'post':
ida_lines.add_extra_cmt(addr, False, text)
self.update_counter(COMMENT + ':' + ctype)
def import_compiler(self, compiler):
"""
Processes the COMPILER element containing the compiler name.
Args:
compiler: XML element containing the compiler name.
"""
name = self.get_attribute(compiler, NAME)
self.update_counter(COMPILER)
if self.plugin:
return
comp = idc.COMP_UNK
if name == "Visual C++":
comp = ida_typeinf.COMP_MS
elif name == "Borland C++":
comp = ida_typeinf.COMP_BC
elif name == "Watcom C++":
comp = ida_typeinf.COMP_WATCOM
elif name == "GNU C++":
comp = ida_typeinf.COMP_GNU
elif name == "Visual Age C++":
comp = ida_typeinf.COMP_VISAGE
elif name == "Delphi":
comp = ida_typeinf.COMP_BP
ida_typeinf.set_compiler_id(comp)
def import_defined_data(self, defined_data):
"""
Processes a DEFINED_DATA element by creating a data item at the
specified address.
Args:
defined_data: XML element containing the address and
datatype information for the data item
"""
if self.options.DataDefinitions.checked == False:
return
addr = self.get_address(defined_data, ADDRESS)
datatype = self.get_attribute(defined_data, DATATYPE)
size = self.get_attribute_value(defined_data, SIZE)
self.update_counter(DEFINED_DATA)
ti = ida_nalt.opinfo_t()
if self.is_pointer_type(datatype):
#idaapi.set_refinfo(ti, 0, 0, 0, REF_OFF32)
flag = ida_bytes.dword_flag() | idc.FF_0OFF
#idaapi.set_typeinfo(addr, 0, flag, ti)
else:
flag = self.get_datatype_flags(datatype, size)
if flag == ida_bytes.strlit_flag():
ida_bytes.create_strlit(addr, size, self.get_string_type(datatype))
elif flag == ida_bytes.stru_flag():
idc.create_struct(addr, size, datatype)
else:
idc.create_data(addr, flag, size, BADNODE)
typecmt = defined_data.find(TYPEINFO_CMT)
if typecmt != None:
self.update_counter(DEFINED_DATA + ':' + TYPEINFO_CMT)
def import_description(self, description):
"""
Processes the DESCRIPTION element.
Args:
description: DESCRIPTION XML element.
"""
self.update_counter(DESCRIPTION)
# TODO: import_description: decide what to do with DESCRIPTION
# print description.text
def import_enum(self, enum):
"""
Processes an ENUM element by creating the enumeration.
Args:
enum: XML element containing the enumeration name and
member data.
"""
if self.options.DataTypes.checked == False:
return
name = self.get_attribute(enum, NAME)
if self.has_attribute(enum, NAMESPACE):
namespace = self.get_attribute(enum, NAMESPACE)
if self.has_attribute(enum, SIZE):
size = self.get_attribute_value(enum, SIZE)
eid = idc.add_enum(BADNODE, name,
ida_bytes.hex_flag() | ida_bytes.dword_flag())
self.update_counter(ENUM)
regcmt = enum.find(REGULAR_CMT)
if regcmt != None:
idc.set_enum_cmt(eid, regcmt.text, False)
self.update_counter(ENUM + ':' + REGULAR_CMT)
rptcmt = enum.find(REPEATABLE_CMT)
if rptcmt != None:
idc.set_enum_cmt(eid, rptcmt.text, True)
self.update_counter(ENUM + ':' + REPEATABLE_CMT)
display_settings = enum.find(DISPLAY_SETTINGS)
if display_settings != None:
self.update_counter(ENUM + ':' + DISPLAY_SETTINGS)
enum_entries = enum.findall(ENUM_ENTRY)
for enum_entry in enum_entries:
self.import_enum_entry(enum_entry, eid)
def import_enum_entry(self, enum_entry, eid):
"""
Processes an ENUM_ENTRY by creating a member in the enumeration.
Args:
enum_entry: XML element containing the member name and value.
eid: Integer representing the id of the enumeration.
"""
name = self.get_attribute(enum_entry, NAME)
value = self.get_attribute_value(enum_entry, VALUE)
ida_enum.add_enum_member(eid, name, value)
cid = idc.get_enum_member_by_name(name)
self.update_counter(ENUM_ENTRY)
regcmt = enum_entry.find(REGULAR_CMT)
if regcmt != None:
idc.set_enum_member_cmt(cid, regcmt.text, False)
self.update_counter(ENUM_ENTRY + ':' + REGULAR_CMT)
rptcmt = enum_entry.find(REPEATABLE_CMT)
if rptcmt != None:
idc.set_enum_member_cmt(cid, rptcmt.text, True)
self.update_counter(ENUM_ENTRY + ':' + REPEATABLE_CMT)
def import_equate(self, equate, eid):
| |
<filename>boto3_type_annotations_with_docs/boto3_type_annotations/backup/client.py<gh_stars>100-1000
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_backup_plan(self, BackupPlan: Dict, BackupPlanTags: Dict = None, CreatorRequestId: str = None) -> Dict:
"""
Backup plans are documents that contain information that AWS Backup uses to schedule tasks that create recovery points of resources.
If you call ``CreateBackupPlan`` with a plan that already exists, the existing ``backupPlanId`` is returned.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/CreateBackupPlan>`_
**Request Syntax**
::
response = client.create_backup_plan(
BackupPlan={
'BackupPlanName': 'string',
'Rules': [
{
'RuleName': 'string',
'TargetBackupVaultName': 'string',
'ScheduleExpression': 'string',
'StartWindowMinutes': 123,
'CompletionWindowMinutes': 123,
'Lifecycle': {
'MoveToColdStorageAfterDays': 123,
'DeleteAfterDays': 123
},
'RecoveryPointTags': {
'string': 'string'
}
},
]
},
BackupPlanTags={
'string': 'string'
},
CreatorRequestId='string'
)
**Response Syntax**
::
{
'BackupPlanId': 'string',
'BackupPlanArn': 'string',
'CreationDate': datetime(2015, 1, 1),
'VersionId': 'string'
}
**Response Structure**
- *(dict) --*
- **BackupPlanId** *(string) --*
Uniquely identifies a backup plan.
- **BackupPlanArn** *(string) --*
An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for example, ``arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50`` .
- **CreationDate** *(datetime) --*
The date and time that a backup plan is created, in Unix format and Coordinated Universal Time (UTC). The value of ``CreationDate`` is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
- **VersionId** *(string) --*
Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1024 bytes long. They cannot be edited.
:type BackupPlan: dict
:param BackupPlan: **[REQUIRED]**
Specifies the body of a backup plan. Includes a ``BackupPlanName`` and one or more sets of ``Rules`` .
- **BackupPlanName** *(string) --* **[REQUIRED]**
The display name of a backup plan.
- **Rules** *(list) --* **[REQUIRED]**
An array of ``BackupRule`` objects, each of which specifies a scheduled task that is used to back up a selection of resources.
- *(dict) --*
Specifies a scheduled task used to back up a selection of resources.
- **RuleName** *(string) --* **[REQUIRED]**
>An optional display name for a backup rule.
- **TargetBackupVaultName** *(string) --* **[REQUIRED]**
The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.
- **ScheduleExpression** *(string) --*
A CRON expression specifying when AWS Backup initiates a backup job.
- **StartWindowMinutes** *(integer) --*
The amount of time in minutes before beginning a backup.
- **CompletionWindowMinutes** *(integer) --*
The amount of time AWS Backup attempts a backup before canceling the job and returning an error.
- **Lifecycle** *(dict) --*
The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup will transition and expire backups automatically according to the lifecycle that you define.
Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days”. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.
- **MoveToColdStorageAfterDays** *(integer) --*
Specifies the number of days after creation that a recovery point is moved to cold storage.
- **DeleteAfterDays** *(integer) --*
Specifies the number of days after creation that a recovery point is deleted. Must be greater than ``MoveToColdStorageAfterDays`` .
- **RecoveryPointTags** *(dict) --*
To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair.
- *(string) --*
- *(string) --*
:type BackupPlanTags: dict
:param BackupPlanTags:
To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair. The specified tags are assigned to all backups created with this plan.
- *(string) --*
- *(string) --*
:type CreatorRequestId: string
:param CreatorRequestId:
Identifies the request and allows failed requests to be retried without the risk of executing the operation twice. If the request includes a ``CreatorRequestId`` that matches an existing backup plan, that plan is returned. This parameter is optional.
:rtype: dict
:returns:
"""
pass
def create_backup_selection(self, BackupPlanId: str, BackupSelection: Dict, CreatorRequestId: str = None) -> Dict:
"""
Creates a JSON document that specifies a set of resources to assign to a backup plan. Resources can be included by specifying patterns for a ``ListOfTags`` and selected ``Resources`` .
For example, consider the following patterns:
* ``Resources: "arn:aws:ec2:region:account-id:volume/volume-id"``
* ``ConditionKey:"department"`` ``ConditionValue:"finance"`` ``ConditionType:"StringEquals"``
* ``ConditionKey:"importance"`` ``ConditionValue:"critical"`` ``ConditionType:"StringEquals"``
Using these patterns would back up all Amazon Elastic Block Store (Amazon EBS) volumes that are tagged as ``"department=finance"`` , ``"importance=critical"`` , in addition to an EBS volume with the specified volume Id.
Resources and conditions are additive in that all resources that match the pattern are selected. This shouldn't be confused with a logical AND, where all conditions must match. The matching patterns are logically 'put together using the OR operator. In other words, all patterns that match are selected for backup.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/CreateBackupSelection>`_
**Request Syntax**
::
response = client.create_backup_selection(
BackupPlanId='string',
BackupSelection={
'SelectionName': 'string',
'IamRoleArn': 'string',
'Resources': [
'string',
],
'ListOfTags': [
{
'ConditionType': 'STRINGEQUALS',
'ConditionKey': 'string',
'ConditionValue': 'string'
},
]
},
CreatorRequestId='string'
)
**Response Syntax**
::
{
'SelectionId': 'string',
'BackupPlanId': 'string',
'CreationDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
- **SelectionId** *(string) --*
Uniquely identifies the body of a request to assign a set of resources to a backup plan.
- **BackupPlanId** *(string) --*
Uniquely identifies a backup plan.
- **CreationDate** *(datetime) --*
The date and time a backup selection is created, in Unix format and Coordinated Universal Time (UTC). The value of ``CreationDate`` is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
:type BackupPlanId: string
:param BackupPlanId: **[REQUIRED]**
Uniquely identifies the backup plan to be associated with the selection of resources.
:type BackupSelection: dict
:param BackupSelection: **[REQUIRED]**
Specifies the body of a request to assign a set of resources to a backup plan.
It includes an array of resources, an optional array of patterns to exclude resources, an optional role to provide access to the AWS service the resource belongs to, and an optional array of tags used to identify a set of resources.
- **SelectionName** *(string) --* **[REQUIRED]**
The display name of a resource selection document.
- **IamRoleArn** *(string) --* **[REQUIRED]**
The ARN of the IAM role that AWS Backup uses to authenticate when restoring the target resource; for example, ``arn:aws:iam::123456789012:role/S3Access`` .
- **Resources** *(list) --*
An array of strings that either contain Amazon Resource Names (ARNs) or match patterns such as \"``arn:aws:ec2:us-east-1:123456789012:volume/*`` \" of resources to assign to a backup plan.
- *(string) --*
- **ListOfTags** *(list) --*
An array of conditions used to specify a set of resources to assign to a backup plan; for example, ``\"StringEquals\": {\"ec2:ResourceTag/Department\": \"accounting\"`` .
- *(dict) --*
Contains an array of triplets made up of a condition type (such as ``StringEquals`` ), a key, and a value. Conditions are used to filter resources in a selection that is assigned to a backup plan.
- **ConditionType** *(string) --* **[REQUIRED]**
An operation, such as ``StringEquals`` , that is applied to | |
<filename>mars/dataframe/indexing/getitem.py
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import pandas as pd
import numpy as np
from ... import opcodes as OperandDef
from ...config import options
from ...serialize import AnyField, Int32Field, BoolField
from ...utils import tokenize
from ..align import align_dataframe_series
from ..core import SERIES_TYPE
from ..merge import DataFrameConcat
from ..operands import DataFrameOperand, DataFrameOperandMixin, ObjectType
from ..utils import parse_index, in_range_index
from .utils import calc_columns_index
class SeriesIndex(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.INDEX
_labels = AnyField('labels')
_combine_size = Int32Field('combine_size')
_is_intermediate = BoolField('is_intermediate')
def __init__(self, labels=None, combine_size=None, is_intermediate=None, object_type=None, **kw):
super(SeriesIndex, self).__init__(_labels=labels, _combine_size=combine_size,
_is_intermediate=is_intermediate, _object_type=object_type, **kw)
@property
def labels(self):
return self._labels
@property
def combine_size(self):
return self._combine_size
@property
def is_intermediate(self):
return self._is_intermediate
def __call__(self, series):
return self.new_tileable([series], dtype=series.dtype)
def _new_tileables(self, inputs, kws=None, **kw):
# Override this method to automatically decide the output type,
# when `labels` is a list, we will set `object_type` as series,
# otherwise it will be a scalar.
object_type = getattr(self, '_object_type', None)
shape = kw.pop('shape', None)
is_scalar = not isinstance(self._labels, list)
if object_type is None:
object_type = ObjectType.scalar if is_scalar else ObjectType.series
self._object_type = object_type
if shape is None:
shape = () if is_scalar else ((len(self._labels)),)
kw['shape'] = shape
if not is_scalar:
index_value = kw.pop('index_value', None) or parse_index(pd.Index(self._labels))
kw['index_value'] = index_value
return super(SeriesIndex, self)._new_tileables(inputs, kws=kws, **kw)
def _new_chunks(self, inputs, kws=None, **kw):
# Override this method to automatically decide the output type,
# when `labels` is a list, we will set `object_type` as series,
# otherwise it will be a scalar.
object_type = getattr(self, '_object_type', None)
is_scalar = not isinstance(self._labels, list)
if object_type is None:
object_type = ObjectType.scalar if is_scalar else ObjectType.series
self._object_type = object_type
if kw.get('shape', None) is None:
shape = () if is_scalar else ((len(self._labels)),)
kw['shape'] = shape
if not is_scalar:
index_value = kw.pop('index_value', None) or parse_index(pd.Index(self._labels))
kw['index_value'] = index_value
return super(SeriesIndex, self)._new_chunks(inputs, kws=kws, **kw)
@classmethod
def _calc_chunk_index(cls, label, chunk_indexes):
for i, index in enumerate(chunk_indexes):
if isinstance(index, pd.RangeIndex) and in_range_index(label, index):
return i
elif label in index:
return i
raise TypeError("label %s doesn't exist" % label)
@classmethod
def _tile_one_chunk(cls, op):
in_series = op.inputs[0]
out_series = op.outputs[0]
index_op = SeriesIndex(labels=op.labels)
index_chunk = index_op.new_chunk(in_series.chunks, dtype=out_series.dtype)
new_op = op.copy()
nsplits = ((len(op.labels),),) if isinstance(op.labels, list) else ()
return new_op.new_tileables(op.inputs, chunks=[index_chunk], nsplits=nsplits,
dtype=out_series.dtype)
@classmethod
def _tree_getitem(cls, op):
"""
DataFrame doesn't store the index value except RangeIndex or specify `store=True` in `parse_index`,
So we build a tree structure to avoid too much dependence for getitem node.
"""
out_series = op.outputs[0]
combine_size = options.combine_size
chunks = op.inputs[0].chunks
while len(chunks) > combine_size:
new_chunks = []
for i in range(0, len(chunks), combine_size):
chks = chunks[i: i + combine_size]
if len(chks) == 1:
chk = chks[0]
else:
concat_op = DataFrameConcat(object_type=ObjectType.series)
chk = concat_op.new_chunk(chks, dtype=chks[0].dtype)
chk_op = SeriesIndex(labels=op.labels, is_intermediate=True)
chk = chk_op.new_chunk([chk], shape=(np.nan,), dtype=chk.dtype,
index_value=parse_index(pd.RangeIndex(0)))
new_chunks.append(chk)
chunks = new_chunks
concat_op = DataFrameConcat(object_type=ObjectType.series)
chk = concat_op.new_chunk(chunks, dtype=chunks[0].dtype)
index_op = SeriesIndex(labels=op.labels)
chunk = index_op.new_chunk([chk], dtype=chk.dtype)
new_op = op.copy()
nsplits = ((len(op.labels),),) if isinstance(op.labels, list) else ()
return new_op.new_tileables(op.inputs, dtype=out_series.dtype, chunks=[chunk], nsplits=nsplits)
@classmethod
def tile(cls, op):
in_series = op.inputs[0]
out_series = op.outputs[0]
if len(in_series.chunks) == 1:
return cls._tile_one_chunk(op)
if not in_series.index_value.has_value():
return cls._tree_getitem(op)
chunk_indexes = [c.index_value.to_pandas() for c in in_series.chunks]
if not isinstance(op.labels, list):
selected_chunk = in_series.chunks[cls._calc_chunk_index(op.labels, chunk_indexes)]
index_op = op.copy().reset_key()
out_chunk = index_op.new_chunk([selected_chunk], shape=(), dtype=selected_chunk.dtype)
new_op = op.copy()
return new_op.new_scalars(op.inputs, dtype=out_series.dtype, chunks=[out_chunk])
else:
# When input series's index is RangeIndex(5), chunk_size is 3, and labels is [4, 2, 3, 4],
# Combine the labels in the same chunk, so the splits will be [[4], [2], [3, 4]],
# the corresponding chunk index is [1, 0, 1].
selected_index = [cls._calc_chunk_index(label, chunk_indexes) for label in op.labels]
condition = np.where(np.diff(selected_index))[0] + 1
column_splits = np.split(op.labels, condition)
column_indexes = np.split(selected_index, condition)
out_chunks = []
nsplits = []
for i, (labels, idx) in enumerate(zip(column_splits, column_indexes)):
index_op = SeriesIndex(labels=list(labels))
c = in_series.chunks[idx[0]]
nsplits.append(len(labels))
out_chunks.append(index_op.new_chunk([c], shape=(len(labels),), dtype=c.dtype,
index_value=parse_index(pd.RangeIndex(len(labels))),
name=c.name, index=(i,)))
new_op = op.copy()
return new_op.new_seriess(op.inputs, shape=out_series.shape, dtype=out_series.dtype,
index_value=out_series.index_value, nsplits=(tuple(nsplits),),
chunks=out_chunks)
@classmethod
def execute(cls, ctx, op):
series = ctx[op.inputs[0].key]
labels = op.labels
if op.is_intermediate:
# for intermediate result, it is always a series even if labels is a scalar.
labels = labels if isinstance(labels, list) else [labels]
labels = [label for label in set(labels) if label in series]
ctx[op.outputs[0].key] = series[labels]
class DataFrameIndex(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.INDEX
_col_names = AnyField('col_names')
_mask = AnyField('mask')
def __init__(self, col_names=None, mask=None, object_type=ObjectType.series, **kw):
super(DataFrameIndex, self).__init__(_col_names=col_names, _mask=mask,
_object_type=object_type, **kw)
@property
def col_names(self):
return self._col_names
@property
def mask(self):
return self._mask
def __call__(self, df):
if self.col_names is not None:
# if col_names is a list, return a DataFrame, else return a Series
if isinstance(self._col_names, list):
dtypes = df.dtypes[self._col_names]
columns = parse_index(pd.Index(self._col_names), store_data=True)
return self.new_dataframe([df], shape=(df.shape[0], len(self._col_names)), dtypes=dtypes,
index_value=df.index_value, columns_value=columns)
else:
dtype = df.dtypes[self._col_names]
return self.new_series([df], shape=(df.shape[0],), dtype=dtype, index_value=df.index_value,
name=self._col_names)
else:
if isinstance(self.mask, SERIES_TYPE):
index_value = parse_index(pd.Index([], dtype=df.index_value.to_pandas().dtype),
key=tokenize(df.key, self.mask.key,
df.index_value.key, self.mask.index_value.key))
return self.new_dataframe([df, self._mask], shape=(np.nan, df.shape[1]), dtypes=df.dtypes,
index_value=index_value, columns_value=df.columns)
else:
index_value = parse_index(pd.Index([], dtype=df.index_value.to_pandas().dtype),
key=tokenize(df.key, pd.util.hash_pandas_object(self.mask),
df.index_value.key, parse_index(self.mask.index).key))
return self.new_dataframe([df], shape=(np.nan, df.shape[1]), dtypes=df.dtypes,
index_value=index_value, columns_value=df.columns)
@classmethod
def tile(cls, op):
if op.col_names is not None:
return cls.tile_with_columns(op)
else:
return cls.tile_with_mask(op)
@classmethod
def tile_with_mask(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
out_chunks = []
if isinstance(op.mask, SERIES_TYPE):
mask = op.inputs[1]
nsplits, out_shape, df_chunks, mask_chunks = align_dataframe_series(in_df, mask, axis='index')
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for idx, df_chunk in zip(out_chunk_indexes, df_chunks):
mask_chunk = mask_chunks[df_chunk.index[0]]
out_chunk = op.copy().reset_key().new_chunk([df_chunk, mask_chunk],
shape=(np.nan, df_chunk.shape[1]), index=idx,
index_value=df_chunk.index_value,
columns_value=df_chunk.columns)
out_chunks.append(out_chunk)
else:
nsplits_acc = np.cumsum((0,) + in_df.nsplits[0])
for idx in range(in_df.chunk_shape[0]):
for idxj in range(in_df.chunk_shape[1]):
in_chunk = in_df.cix[idx, idxj]
chunk_op = op.copy().reset_key()
chunk_op._mask = op.mask.iloc[nsplits_acc[idx]:nsplits_acc[idx+1]]
out_chunk = chunk_op.new_chunk([in_chunk], index=in_chunk.index,
shape=(np.nan, in_chunk.shape[1]), dtypes=in_chunk.dtypes,
index_value=in_df.index_value, columns_value=in_chunk.columns)
out_chunks.append(out_chunk)
nsplits = ((np.nan,) * in_df.chunk_shape[0], in_df.nsplits[1])
new_op = op.copy()
return new_op.new_dataframes(op.inputs, shape=out_df.shape, dtypes=out_df.dtypes,
index_value=out_df.index_value, columns_value=out_df.columns,
chunks=out_chunks, nsplits=nsplits)
@classmethod
def tile_with_columns(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
col_names = op.col_names
if not isinstance(col_names, list):
column_index = calc_columns_index(col_names, in_df)
out_chunks = []
dtype = in_df.dtypes[col_names]
for i in range(in_df.chunk_shape[0]):
c = in_df.cix[(i, column_index)]
op = DataFrameIndex(col_names=col_names)
out_chunks.append(op.new_chunk([c], shape=(c.shape[0],), index=(i,), dtype=dtype,
index_value=c.index_value, name=col_names))
new_op = op.copy()
return new_op.new_seriess(op.inputs, shape=out_df.shape, dtype=out_df.dtype,
index_value=out_df.index_value, name=out_df.name,
nsplits=(in_df.nsplits[0],), chunks=out_chunks)
else:
# combine columns into one chunk and keep the columns order at the same time.
# When chunk columns are ['c1', 'c2', 'c3'], ['c4', 'c5'],
# selected columns are ['c2', 'c3', 'c4', 'c2'], `column_splits` will be
# [(['c2', 'c3'], 0), ('c4', 1), ('c2', 0)].
selected_index = [calc_columns_index(col, in_df) for col in col_names]
condition = np.where(np.diff(selected_index))[0] + 1
column_splits = np.split(col_names, condition)
column_indexes = np.split(selected_index, condition)
out_chunks = [[] for _ in range(in_df.chunk_shape[0])]
column_nsplits = []
for i, (columns, column_idx) in enumerate(zip(column_splits, column_indexes)):
dtypes = in_df.dtypes[columns]
column_nsplits.append(len(columns))
for j in range(in_df.chunk_shape[0]):
c = in_df.cix[(j, column_idx[0])]
index_op = DataFrameIndex(col_names=list(columns), object_type=ObjectType.dataframe)
out_chunk = index_op.new_chunk([c], shape=(c.shape[0], len(columns)), index=(j, i),
dtypes=dtypes, index_value=c.index_value,
columns_value=parse_index(pd.Index(columns),
store_data=True))
out_chunks[j].append(out_chunk)
out_chunks = [item for l in out_chunks for item in l]
new_op = op.copy()
nsplits = (in_df.nsplits[0], tuple(column_nsplits))
return new_op.new_dataframes(op.inputs, shape=out_df.shape, dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=out_chunks, nsplits=nsplits)
@classmethod
def execute(cls, ctx, op):
if op.mask is None:
df = ctx[op.inputs[0].key]
ctx[op.outputs[0].key] = df[op.col_names]
else:
df = ctx[op.inputs[0].key]
if isinstance(op.mask, SERIES_TYPE):
mask = ctx[op.inputs[1].key]
else:
mask = op.mask
ctx[op.outputs[0].key] = df[mask]
def dataframe_getitem(df, item):
columns = df.columns.to_pandas()
if isinstance(item, list):
for col_name in item:
if col_name not in columns:
raise KeyError('%s not in columns' % col_name)
op = DataFrameIndex(col_names=item, object_type=ObjectType.dataframe)
elif isinstance(item, SERIES_TYPE) and item.dtype == np.bool:
op = DataFrameIndex(mask=item, object_type=ObjectType.dataframe)
elif isinstance(item, pd.Series) and item.dtype == np.bool:
op = DataFrameIndex(mask=item, object_type=ObjectType.dataframe)
else:
if item not in columns:
raise KeyError('%s not in columns' % item)
op = DataFrameIndex(col_names=item)
return op(df)
def series_getitem(series, labels, combine_size=None):
if isinstance(labels, | |
created by putting \\test commands in the
# documentation.
# The default value is: YES.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
# list. This list is created by putting \\bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
# the deprecated list. This list is created by putting \\deprecated commands in
# the documentation.
# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \\if <section_label> ... \\endif and \\cond <section_label>
# ... \\endcond blocks.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
# documentation. If the initializer consists of more lines than specified here
# it will be hidden. Use a value of 0 to hide initializers completely. The
# appearance of the value of individual variables and macros / defines can be
# controlled using \\showinitializer or \\hideinitializer command in the
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES the list
# will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
# will remove the Files entry from the Quick Index and from the Folder Tree View
# (if specified).
# The default value is: YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
# page. This will remove the Namespaces entry from the Quick Index and from the
# Folder Tree View (if specified).
# The default value is: YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command command input-file, where command is the value of the
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
# by doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. Do not use file names with spaces, bibtex cannot handle them. See
# also \\cite for info how to create references.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some parameters
# in a documented function, or documenting parameters that don't exist or using
# markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO doxygen will only warn about wrong or incomplete parameter
# documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr).
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = "@CMAKE_CURRENT_SOURCE_DIR@/mainpage.dox" "@CMAKE_CURRENT_SOURCE_DIR@/include/"
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see: http://www.gnu.org/software/libiconv) for the list of
# possible encodings.
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank the
# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
# *.qsf, *.as and *.js.
FILE_PATTERNS =
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
#
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories.
#
# Note | |
#
# Copyright (c) 2006-2013, Prometheus Research, LLC
#
"""
:mod:`htsql.core.tr.fn.bind`
============================
"""
from ...util import aresubclasses
from ...adapter import Adapter, Component, adapt, adapt_many, call
from ...domain import (Domain, UntypedDomain, BooleanDomain, TextDomain,
IntegerDomain, DecimalDomain, FloatDomain, DateDomain, TimeDomain,
DateTimeDomain, EnumDomain, ListDomain, RecordDomain, EntityDomain,
IdentityDomain)
from ...syn.syntax import (LiteralSyntax, NumberSyntax, IntegerSyntax,
StringSyntax, IdentifierSyntax, ComposeSyntax, ApplySyntax,
OperatorSyntax, PrefixSyntax, GroupSyntax, ReferenceSyntax)
from ..binding import (LiteralBinding, SortBinding, SieveBinding,
IdentityBinding, FormulaBinding, CastBinding, ImplicitCastBinding,
WrappingBinding, TitleBinding, DirectionBinding, QuotientBinding,
AssignmentBinding, DefineBinding, DefineReferenceBinding,
DefineCollectionBinding, SelectionBinding, HomeBinding,
RescopingBinding, CoverBinding, ForkBinding, ClipBinding, AliasBinding,
Binding, BindingRecipe, ComplementRecipe, KernelRecipe,
SubstitutionRecipe, ClosedRecipe)
from ..bind import BindByName, BindingState
from ...classify import normalize
from ...error import Error, translate_guard
from ..coerce import coerce
from ..decorate import decorate
from ..lookup import direct, expand, identify, guess_tag, unwrap
from ..signature import (Signature, NullarySig, UnarySig, BinarySig,
CompareSig, IsEqualSig, IsTotallyEqualSig, IsInSig, IsAmongSig,
IsNullSig, IfNullSig, NullIfSig, AndSig, OrSig, NotSig,
SortDirectionSig)
from .signature import (AsSig, LimitSig, SortSig, CastSig, MakeDateSig,
MakeDateTimeSig, CombineDateTimeSig, ExtractYearSig, ExtractMonthSig,
ExtractDaySig, ExtractHourSig, ExtractMinuteSig, ExtractSecondSig,
AddSig, ConcatenateSig, HeadSig, TailSig, SliceSig, AtSig, ReplaceSig,
UpperSig, LowerSig, TrimSig, DateIncrementSig, DateTimeIncrementSig,
SubtractSig, DateDecrementSig, DateTimeDecrementSig, DateDifferenceSig,
TodaySig, NowSig, MultiplySig, DivideSig, IfSig, SwitchSig,
KeepPolaritySig, ReversePolaritySig, RoundSig, RoundToSig, TruncSig,
TruncToSig, SquareRootSig, LengthSig, ContainsSig, ExistsSig, CountSig,
MinMaxSig, SumSig, AvgSig, AggregateSig, QuantifySig, DefineSig,
GivenSig, SelectSig, LinkSig, TopSig, GuardSig)
import sys
class BindFunction(BindByName):
signature = None
hint = None
help = None
def match(self):
assert self.signature is not None
arguments = {}
if self.arguments is None:
operands = []
else:
operands = self.arguments[:]
min_args = len([slot for slot in self.signature.slots
if slot.is_mandatory])
max_args = len(self.signature.slots)
if self.signature.slots and not self.signature.slots[-1].is_singular:
max_args = None
if len(operands) < min_args or (max_args is not None and
len(operands) > max_args):
if min_args == max_args == 1:
message = "1 argument"
elif min_args == max_args:
message = "%s arguments" % min_args
elif max_args == 1:
message = "%s to %s argument" % (min_args, max_args)
elif max_args is not None:
message = "%s to %s arguments" % (min_args, max_args)
else:
message = "%s or more arguments" % min_args
raise Error("Function '%s' expects %s; got %s"
% (self.name.encode('utf-8'),
message, len(operands)))
for index, slot in enumerate(self.signature.slots):
name = slot.name
value = None
if not operands:
assert not slot.is_mandatory
if not slot.is_singular:
value = []
elif slot.is_singular:
value = operands.pop(0)
else:
if index == len(self.signature.slots)-1:
value = operands[:]
operands[:] = []
else:
value = [operands.pop(0)]
arguments[name] = value
assert not operands
return arguments
def bind(self):
assert self.signature is not None
arguments = self.match()
bound_arguments = {}
for slot in self.signature.slots:
name = slot.name
value = arguments[name]
bound_value = None
if slot.is_singular:
if value is not None:
bound_value = self.state.bind(value)
#if expand(bound_value) is not None:
# raise Error("unexpected list argument",
# value.mark)
else:
if len(value) > 1:
bound_value = [self.state.bind(item) for item in value]
elif len(value) == 1:
[value] = value
bound_value = self.state.bind(value)
recipes = expand(bound_value, with_syntax=True)
if slot.is_mandatory and (recipes is not None and
not recipes):
with translate_guard(value):
raise Error("Expected at least one element")
if recipes is None:
bound_value = [bound_value]
else:
bound_value = []
for syntax, recipe in recipes:
bound_value.append(self.state.use(recipe, syntax))
else:
bound_value = []
bound_arguments[name] = bound_value
return bound_arguments
def correlate(self, **arguments):
raise NotImplementedError()
def __call__(self):
arguments = self.bind()
return self.correlate(**arguments)
class BindMacro(BindFunction):
def expand(self, **arguments):
raise NotImplementedError()
def __call__(self):
arguments = self.match()
return self.expand(**arguments)
class BindMonoFunction(BindFunction):
signature = None
domains = []
codomain = None
def correlate(self, **arguments):
assert self.signature is not None
assert len(self.signature.slots) == len(self.domains)
assert self.codomain is not None
cast_arguments = {}
for domain, slot in zip(self.domains, self.signature.slots):
name = slot.name
value = arguments[name]
if slot.is_singular:
if value is not None:
value = ImplicitCastBinding(value, domain, value.syntax)
else:
value = [ImplicitCastBinding(item, domain, item.syntax)
for item in value]
cast_arguments[name] = value
return FormulaBinding(self.state.scope,
self.signature(), coerce(self.codomain),
self.syntax, **cast_arguments)
class BindHomoFunction(BindFunction):
codomain = None
def correlate(self, **arguments):
assert self.signature is not None
domains = []
for slot in self.signature.slots:
name = slot.name
value = arguments[name]
if slot.is_singular:
if value is not None:
domains.append(value.domain)
else:
domains.extend(item.domain for item in value)
domain = coerce(*domains)
if domain is None:
if len(domains) > 1:
raise Error("Cannot coerce values of types (%s)"
" to a common type"
% (", ".join(str(domain)
for domain in domains)))
else:
raise Error("Expected a scalar value")
cast_arguments = {}
for slot in self.signature.slots:
name = slot.name
value = arguments[name]
if slot.is_singular:
if value is not None:
value = ImplicitCastBinding(value, domain, value.syntax)
else:
value = [ImplicitCastBinding(item, domain, item.syntax)
for item in value]
cast_arguments[name] = value
if self.codomain is None:
codomain = domain
else:
codomain = coerce(self.codomain)
return FormulaBinding(self.state.scope,
self.signature, codomain, self.syntax,
**cast_arguments)
class Correlate(Component):
__signature__ = None
__domains__ = []
__arity__ = 0
@classmethod
def __dominates__(component, other):
if component.__signature__ is None:
return False
if other.__signature__ is None:
return False
if issubclass(component, other):
return True
if (issubclass(component.__signature__, other.__signature__)
and component.__signature__ is not other.__signature__):
return True
return False
@classmethod
def __matches__(component, dispatch_key):
if component.__signature__ is None:
return False
key_signature, key_domain_vector = dispatch_key
if not issubclass(key_signature, component.__signature__):
return False
if len(key_domain_vector) < component.__arity__:
return False
key_domain_vector = key_domain_vector[:component.__arity__]
for domain_vector in component.__domains__:
if aresubclasses(key_domain_vector, domain_vector):
return True
return False
@classmethod
def __dispatch__(interface, binding, *args, **kwds):
assert isinstance(binding, FormulaBinding)
signature = type(binding.signature)
domain_vector = []
for slot in signature.slots:
if not (slot.is_mandatory and slot.is_singular):
break
domain = type(binding.arguments[slot.name].domain)
domain_vector.append(domain)
return (signature, tuple(domain_vector))
def __init__(self, binding, state):
assert isinstance(binding, FormulaBinding)
assert isinstance(state, BindingState)
self.binding = binding
self.state = state
self.arguments = binding.arguments
def __call__(self):
if isinstance(self.binding.syntax, OperatorSyntax):
name = "operator '%s'" % self.binding.syntax.symbol.encode('utf-8')
elif isinstance(self.binding.syntax, PrefixSyntax):
name = "unary operator '%s'" \
% self.binding.syntax.symbol.encode('utf-8')
elif isinstance(self.binding.syntax, ApplySyntax):
name = "function '%s'" % self.binding.syntax.name.encode('utf-8')
else:
name = "'%s'" % self.binding.syntax
key_signature, domain_vector = self.__dispatch_key__
if len(domain_vector) > 1:
types = "types"
values = "values"
else:
types = "type"
values = "a value"
families = ", ".join("%s" % domain_class
for domain_class in domain_vector)
if len(domain_vector) > 1:
families = "(%s)" % families
valid_types = []
for component in self.__interface__.__implementations__():
if component.__signature__ is None:
continue
if not issubclass(key_signature, component.__signature__):
continue
for domain_vector in component.__domains__:
if any(issubclass(domain_class, UntypedDomain)
for domain_class in domain_vector):
continue
valid_families = ", ".join("%s" % domain_class
for domain_class in domain_vector)
if len(domain_vector) > 1:
valid_families = "(%s)" % valid_families
if valid_families not in valid_types:
valid_types.append(valid_families)
error = Error("Cannot apply %s to %s of %s %s"
% (name, values, types, families))
if valid_types:
error.wrap("Valid %s" % types, "\n".join(valid_types))
raise error
def match(signature, *domain_vectors):
assert issubclass(signature, Signature)
domain_vectors = [domain_vector if isinstance(domain_vector, tuple)
else (domain_vector,)
for domain_vector in domain_vectors]
assert len(domain_vectors) > 0
arity = len(domain_vectors[0])
assert all(len(domain_vector) == arity
for domain_vector in domain_vectors)
frame = sys._getframe(1)
frame.f_locals['__signature__'] = signature
frame.f_locals['__domains__'] = domain_vectors
frame.f_locals['__arity__'] = arity
class CorrelateFunction(Correlate):
signature = None
domains = []
codomain = None
hint = None
help = None
def __call__(self):
signature = self.binding.signature
if self.signature is not None:
signature = signature.clone_to(self.signature)
assert self.arguments.admits(Binding, signature)
arguments = {}
for index, slot in enumerate(signature.slots):
value = self.arguments[slot.name]
if index < len(self.domains):
domain = coerce(self.domains[index])
if slot.is_singular:
if value is not None:
value = ImplicitCastBinding(value, domain,
value.syntax)
else:
value = [ImplicitCastBinding(item, domain, item.syntax)
for item in value]
arguments[slot.name] = value
domain = self.binding.domain
if self.codomain is not None:
domain = coerce(self.codomain)
return FormulaBinding(self.state.scope,
signature, domain, self.binding.syntax,
**arguments)
class BindPolyFunction(BindFunction):
signature = None
codomain = UntypedDomain()
def correlate(self, **arguments):
binding = FormulaBinding(self.state.scope,
self.signature(), self.codomain, self.syntax,
**arguments)
return Correlate.__invoke__(binding, self.state)
class BindNull(BindMacro):
call('null', ('null', None))
signature = NullarySig
hint = """null() -> NULL"""
def expand(self):
return LiteralBinding(self.state.scope,
None, UntypedDomain(), self.syntax)
class BindTrue(BindMacro):
call('true', ('true', None))
signature = NullarySig
hint = """true() -> TRUE"""
def expand(self):
return LiteralBinding(self.state.scope,
True, coerce(BooleanDomain()), self.syntax)
class BindFalse(BindMacro):
call('false', ('false', None))
signature = NullarySig
hint = """false() -> FALSE"""
def expand(self):
return LiteralBinding(self.state.scope,
False, coerce(BooleanDomain()), self.syntax)
class BindRoot(BindMacro):
call('root')
signature = NullarySig
hint = """base.root() -> the root space"""
def expand(self):
return WrappingBinding(self.state.root, self.syntax)
class BindThis(BindMacro):
call('this')
signature = NullarySig
hint = """base.this() -> the current base space"""
def expand(self):
return WrappingBinding(self.state.scope, self.syntax)
class BindHome(BindMacro):
call('home')
signature = NullarySig
def expand(self):
return HomeBinding(self.state.scope, self.syntax)
class BindDistinct(BindMacro):
call('distinct')
signature = UnarySig
def expand(self, op):
seed = self.state.bind(op)
recipes = expand(seed, with_syntax=True)
if recipes is None:
with translate_guard(op):
raise Error("Function '%s' | |
is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfExtensionProperty', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_extension_properties.metadata = {'url': '/applications/{application-id}/extensionProperties'} # type: ignore
def create_extension_properties(
self,
application_id, # type: str
body, # type: "models.MicrosoftGraphExtensionProperty"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphExtensionProperty"
"""Create new navigation property to extensionProperties for applications.
Create new navigation property to extensionProperties for applications.
:param application_id: key: id of application.
:type application_id: str
:param body: New navigation property.
:type body: ~applications.models.MicrosoftGraphExtensionProperty
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphExtensionProperty, or the result of cls(response)
:rtype: ~applications.models.MicrosoftGraphExtensionProperty
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphExtensionProperty"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_extension_properties.metadata['url'] # type: ignore
path_format_arguments = {
'application-id': self._serialize.url("application_id", application_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphExtensionProperty')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphExtensionProperty', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_extension_properties.metadata = {'url': '/applications/{application-id}/extensionProperties'} # type: ignore
def get_extension_properties(
self,
application_id, # type: str
extension_property_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum8"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphExtensionProperty"
"""Get extensionProperties from applications.
Get extensionProperties from applications.
:param application_id: key: id of application.
:type application_id: str
:param extension_property_id: key: id of extensionProperty.
:type extension_property_id: str
:param select: Select properties to be returned.
:type select: list[str or ~applications.models.Enum8]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphExtensionProperty, or the result of cls(response)
:rtype: ~applications.models.MicrosoftGraphExtensionProperty
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphExtensionProperty"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_extension_properties.metadata['url'] # type: ignore
path_format_arguments = {
'application-id': self._serialize.url("application_id", application_id, 'str'),
'extensionProperty-id': self._serialize.url("extension_property_id", extension_property_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphExtensionProperty', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_extension_properties.metadata = {'url': '/applications/{application-id}/extensionProperties/{extensionProperty-id}'} # type: ignore
def update_extension_properties(
self,
application_id, # type: str
extension_property_id, # type: str
body, # type: "models.MicrosoftGraphExtensionProperty"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property extensionProperties in applications.
Update the navigation property extensionProperties in applications.
:param application_id: key: id of application.
:type application_id: str
:param extension_property_id: key: id of extensionProperty.
:type extension_property_id: str
:param body: New navigation property values.
:type body: ~applications.models.MicrosoftGraphExtensionProperty
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_extension_properties.metadata['url'] # type: ignore
path_format_arguments = {
'application-id': self._serialize.url("application_id", application_id, 'str'),
'extensionProperty-id': self._serialize.url("extension_property_id", extension_property_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphExtensionProperty')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_extension_properties.metadata = {'url': '/applications/{application-id}/extensionProperties/{extensionProperty-id}'} # type: ignore
def delete_extension_properties(
self,
application_id, # type: str
extension_property_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property extensionProperties for applications.
Delete navigation property extensionProperties for applications.
:param application_id: key: id of application.
:type application_id: str
:param extension_property_id: key: id of extensionProperty.
:type extension_property_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_extension_properties.metadata['url'] # type: ignore
path_format_arguments = {
'application-id': self._serialize.url("application_id", application_id, 'str'),
'extensionProperty-id': self._serialize.url("extension_property_id", extension_property_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_extension_properties.metadata = {'url': '/applications/{application-id}/extensionProperties/{extensionProperty-id}'} # type: ignore
def list_home_realm_discovery_policies(
self,
application_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum9"]]]
select=None, # type: Optional[List[Union[str, "models.Enum10"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum11"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfHomeRealmDiscoveryPolicy"]
"""Get homeRealmDiscoveryPolicies from applications.
Get homeRealmDiscoveryPolicies from applications.
:param application_id: key: id of application.
:type application_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~applications.models.Enum9]
:param select: Select properties to be returned.
:type select: list[str or ~applications.models.Enum10]
:param expand: Expand related entities.
:type expand: list[str or ~applications.models.Enum11]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfHomeRealmDiscoveryPolicy or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~applications.models.CollectionOfHomeRealmDiscoveryPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfHomeRealmDiscoveryPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_home_realm_discovery_policies.metadata['url'] # type: ignore
path_format_arguments = {
'application-id': self._serialize.url("application_id", application_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, | |
_infoblox_type = 'awsuser'
_fields = ['access_key_id', 'account_id', 'last_used', 'name',
'nios_user_name', 'secret_access_key', 'status']
_search_for_update_fields = ['access_key_id', 'account_id', 'name']
_updateable_search_fields = ['access_key_id', 'account_id', 'name',
'nios_user_name']
_all_searchable_fields = ['access_key_id', 'account_id', 'name',
'nios_user_name', 'status']
_return_fields = ['access_key_id', 'account_id', 'name']
_remap = {}
_shadow_fields = ['_ref']
class Bfdtemplate(InfobloxObject):
""" Bfdtemplate: BFD template object.
Corresponds to WAPI object 'bfdtemplate'
The Bidirectional Forwarding Detection (BFD) template contains a
configuration of advanced BFD settings such as authentication and
timer intervals.
Fields:
authentication_key: The authentication key for BFD protocol message-
digest authentication.
authentication_key_id: The authentication key identifier for BFD
protocol authentication. Valid values are between 1 and 255.
authentication_type: The authentication type for BFD protocol.
detection_multiplier: The detection time multiplier value for BFD
protocol. The negotiated transmit interval, multiplied by this
value, provides the detection time for the receiving system in
asynchronous BFD mode. Valid values are between 3 and 50.
min_rx_interval: The minimum receive time (in seconds) for BFD
protocol. Valid values are between 50 and 9999.
min_tx_interval: The minimum transmission time (in seconds) for BFD
protocol. Valid values are between 50 and 9999.
name: The name of the BFD template object.
"""
_infoblox_type = 'bfdtemplate'
_fields = ['authentication_key', 'authentication_key_id',
'authentication_type', 'detection_multiplier',
'min_rx_interval', 'min_tx_interval', 'name']
_search_for_update_fields = ['name']
_updateable_search_fields = ['name']
_all_searchable_fields = ['name']
_return_fields = ['name']
_remap = {}
_shadow_fields = ['_ref']
class Bulkhost(InfobloxObject):
""" Bulkhost: Bulkhost object.
Corresponds to WAPI object 'bulkhost'
If you need to add a large number of hosts, you can have the
Infoblox appliance add them as a group and automatically assign host
names based on a range of IP addresses and name format applied to
it. This group of hosts is referred to as a BulkHost. The Infoblox
appliance uses the name space bulk-xx-xx-xx-xx for bulk host, so
this name should not be used for CNAMEs and host aliases because
doing so causes conflicts. Before adding a bulk host, make sure that
no CNAMEs or host aliases uses this name.
Fields:
cloud_info: The cloud API related information.
comment: The descriptive comment.
disable: The disable flag of a DNS BulkHost record.
dns_prefix: The prefix, in punycode format, for the bulk host.
end_addr: The last IP address in the address range for the bulk
host.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
last_queried: The time of the last DNS query in Epoch seconds
format.
name_template: The bulk host name template.
network_view: The network view associated with the bulk host view.
policy: The hostname policy for records under the bulk host parent
zone.
prefix: The prefix for the bulk host. The prefix is the name (or a
series of characters) inserted at the beginning of each host
name.
reverse: The reverse flag of the BulkHost record.
start_addr: The first IP address in the address range for the bulk
host.
template_format: The bulk host name template format.
ttl: The Time to Live (TTL) value.
use_name_template: Use flag for: name_template
use_ttl: Use flag for: ttl
view: The view for the bulk host.
zone: The zone name.
"""
_infoblox_type = 'bulkhost'
_fields = ['cloud_info', 'comment', 'disable', 'dns_prefix', 'end_addr',
'extattrs', 'last_queried', 'name_template', 'network_view',
'policy', 'prefix', 'reverse', 'start_addr', 'template_format',
'ttl', 'use_name_template', 'use_ttl', 'view', 'zone']
_search_for_update_fields = ['prefix']
_updateable_search_fields = ['comment', 'disable', 'end_addr',
'name_template', 'prefix', 'reverse',
'start_addr', 'ttl', 'use_name_template',
'view', 'zone']
_all_searchable_fields = ['comment', 'disable', 'end_addr',
'name_template', 'prefix', 'reverse',
'start_addr', 'ttl', 'use_name_template', 'view',
'zone']
_return_fields = ['comment', 'extattrs', 'prefix']
_remap = {}
_shadow_fields = ['_ref']
class Bulkhostnametemplate(InfobloxObject):
""" Bulkhostnametemplate: The bulk host name template object.
Corresponds to WAPI object 'bulkhostnametemplate'
The object manages the DNS bulk host name formats defined at the
Grid level.
Fields:
is_grid_default: True if this template is Grid default.
pre_defined: True if this is a pre-defined template, False
otherwise.
template_format: The format of bulk host name template. It should
follow certain rules (please use Administration Guide as
reference).
template_name: The name of bulk host name template.
"""
_infoblox_type = 'bulkhostnametemplate'
_fields = ['is_grid_default', 'pre_defined', 'template_format',
'template_name']
_search_for_update_fields = ['template_format', 'template_name']
_updateable_search_fields = ['template_format', 'template_name']
_all_searchable_fields = ['template_format', 'template_name']
_return_fields = ['is_grid_default', 'template_format', 'template_name']
_remap = {}
_shadow_fields = ['_ref']
class Cacertificate(InfobloxObject):
""" Cacertificate: CA Certificate object.
Corresponds to WAPI object 'cacertificate'
An CA Certificate object represents a CA certificate description.
Fields:
distinguished_name: The certificate subject name.
issuer: The certificate issuer subject name.
serial: The certificate serial number in hex format.
used_by: Information about the CA certificate usage.
valid_not_after: The date after which the certificate becomes
invalid.
valid_not_before: The date before which the certificate is not
valid.
"""
_infoblox_type = 'cacertificate'
_fields = ['distinguished_name', 'issuer', 'serial', 'used_by',
'valid_not_after', 'valid_not_before']
_search_for_update_fields = ['distinguished_name', 'issuer', 'serial']
_updateable_search_fields = []
_all_searchable_fields = ['distinguished_name', 'issuer', 'serial']
_return_fields = ['distinguished_name', 'issuer', 'serial', 'used_by',
'valid_not_after', 'valid_not_before']
_remap = {}
_shadow_fields = ['_ref']
class Capacityreport(InfobloxObject):
""" Capacityreport: Grid member capacity report object.
Corresponds to WAPI object 'capacityreport'
The capacity report object provides information about the object
count, interface count, and other memory usage statistics for a Grid
member.
Fields:
hardware_type: Hardware type of a Grid member.
max_capacity: The maximum amount of capacity available for the Grid
member.
name: The Grid member name.
object_counts: A list of instance counts for object types created on
the Grid member.
percent_used: The percentage of the capacity in use by the Grid
member.
role: The Grid member role.
total_objects: The total number of objects created by the Grid
member.
"""
_infoblox_type = 'capacityreport'
_fields = ['hardware_type', 'max_capacity', 'name', 'object_counts',
'percent_used', 'role', 'total_objects']
_search_for_update_fields = ['name']
_updateable_search_fields = []
_all_searchable_fields = ['name']
_return_fields = ['name', 'percent_used', 'role']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'object_counts': CapacityreportObjectcount.from_dict,
}
class Captiveportal(InfobloxObject):
""" Captiveportal: Captive portal object.
Corresponds to WAPI object 'captiveportal'
This object represents the captive portal configuration.
Fields:
authn_server_group: The authentication server group assigned to this
captive portal.
company_name: The company name that appears in the guest
registration page.
enable_syslog_auth_failure: Determines if authentication failures
are logged to syslog or not.
enable_syslog_auth_success: Determines if successful authentications
are logged to syslog or not.
enable_user_type: The type of user to be enabled for the captive
portal.
encryption: The encryption the captive portal uses.
files: The list of files associated with the captive portal.
guest_custom_field1_name: The name of the custom field that you are
adding to the guest registration page.
guest_custom_field1_required: Determines if the custom field is
required or not.
guest_custom_field2_name: The name of the custom field that you are
adding to the guest registration page.
guest_custom_field2_required: Determines if the custom field is
required or not.
guest_custom_field3_name: The name of the custom field that you are
adding to the guest registration page.
guest_custom_field3_required: Determines if the custom field is
required or not.
guest_custom_field4_name: The name of the custom field that you are
adding to the guest registration page.
guest_custom_field4_required: Determines if the custom field is
required or not.
guest_email_required: Determines if the email address of the guest
is required or not.
guest_first_name_required: Determines if the first name of the guest
is required or not.
guest_last_name_required: Determines if the last name of the guest
is required or not.
guest_middle_name_required: Determines if the middle name of the
guest is required or not.
guest_phone_required: Determines if the phone number of the guest is
required or not.
helpdesk_message: The helpdesk message that appears in the guest
registration page.
listen_address_ip: Determines the IP address on which the captive
portal listens. Valid if listen address type is 'IP'.
listen_address_type: Determines the type of the IP address on which
the captive portal listens.
name: The hostname of the Grid member that hosts the captive portal.
network_view: The network view of the captive portal.
port: The TCP port used by the Captive Portal service. The port is
required when the Captive Portal service is enabled. Valid
values are between 1 and 63999. Please note that setting the
port number to 80 or 443 might impact performance.
service_enabled: Determines | |
Stator iron fill factor per Grauers
k_fillr : float
Rotor slot fill factor
k_fills : float
Stator Slot fill factor
k_s : float
magnetic saturation factor for iron
m : int
Number of phases
mu_0 : float, [m*kg/s**2/A**2]
permeability of free space
mu_r : float, [m*kg/s**2/A**2]
relative permeability (neodymium)
p : float
number of pole pairs (taken as int within code)
phi : numpy array[90], [rad]
tilt angle (during transportation)
q1 : int
Stator slots per pole per phase
q2 : int
Rotor slots per pole per phase
ratio_mw2pp : float
ratio of magnet width to pole pitch(bm / self.tau_p)
resist_Cu : float, [ohm/m]
Copper resistivity
sigma : float, [Pa]
assumed max shear stress
v : float
poisson ratio
y_tau_p : float
Stator coil span to pole pitch
y_tau_pr : float
Rotor coil span to pole pitch
I_0 : float, [A]
no-load excitation current
T_rated : float, [N*m]
Rated torque
d_r : float, [m]
arm depth d_r
h_m : float, [m]
magnet height
h_0 : float, [m]
Slot height
h_s : float, [m]
Yoke height h_s
len_s : float, [m]
Stator core length
machine_rating : float, [W]
Machine rating
shaft_rpm : numpy array[n_pc], [rpm]
rated speed of input shaft (lss for direct, hss for geared)
n_r : float
number of arms n
rad_ag : float, [m]
airgap radius
t_wr : float, [m]
arm depth thickness
n_s : float
number of stator arms n_s
b_st : float, [m]
arm width b_st
d_s : float, [m]
arm depth d_s
t_ws : float, [m]
arm depth thickness
D_shaft : float, [m]
Shaft diameter
rho_Copper : float, [kg*m**-3]
Copper density
rho_Fe : float, [kg*m**-3]
Magnetic Steel density
rho_Fes : float, [kg*m**-3]
Structural Steel density
rho_PM : float, [kg*m**-3]
Magnet density
Returns
-------
B_rymax : float, [T]
Peak Rotor yoke flux density
B_trmax : float, [T]
maximum tooth flux density in rotor
B_tsmax : float, [T]
maximum tooth flux density in stator
B_g : float, [T]
Peak air gap flux density B_g
B_g1 : float, [T]
air gap flux density fundamental
B_pm1 : float
Fundamental component of peak air gap flux density
N_s : float
Number of turns in the stator winding
b_s : float, [m]
slot width
b_t : float, [m]
tooth width
A_Curcalc : float, [mm**2]
Conductor cross-section mm^2
A_Cuscalc : float, [mm**2]
Stator Conductor cross-section mm^2
b_m : float
magnet width
mass_PM : float, [kg]
Magnet mass
Copper : float, [kg]
Copper Mass
Iron : float, [kg]
Electrical Steel Mass
Structural_mass : float, [kg]
Structural Mass
generator_mass : float, [kg]
Actual mass
f : float
Generator output frequency
I_s : float, [A]
Generator output phase current
R_s : float, [ohm]
Stator resistance
L_s : float
Stator synchronising inductance
J_s : float, [A*m**-2]
Stator winding current density
A_1 : float
Specific current loading
K_rad : float
Stack length ratio
Losses : numpy array[n_pc], [W]
Total loss
generator_efficiency : numpy array[n_pc]
Generator electromagnetic efficiency values (<1)
u_ar : float, [m]
Rotor radial deflection
u_as : float, [m]
Stator radial deflection
u_allow_r : float, [m]
Allowable radial rotor
u_allow_s : float, [m]
Allowable radial stator
y_ar : float, [m]
Rotor axial deflection
y_as : float, [m]
Stator axial deflection
y_allow_r : float, [m]
Allowable axial
y_allow_s : float, [m]
Allowable axial
z_ar : float, [m]
Rotor circumferential deflection
z_as : float, [m]
Stator circumferential deflection
z_allow_r : float, [m]
Allowable circum rotor
z_allow_s : float, [m]
Allowable circum stator
b_allow_r : float, [m]
Allowable arm dimensions
b_allow_s : float, [m]
Allowable arm
TC1 : float, [m**3]
Torque constraint
TC2r : float, [m**3]
Torque constraint-rotor
TC2s : float, [m**3]
Torque constraint-stator
R_out : float, [m]
Outer radius
S : float
Stator slots
Slot_aspect_ratio : float
Slot aspect ratio
Slot_aspect_ratio1 : float
Stator slot aspect ratio
Slot_aspect_ratio2 : float
Rotor slot aspect ratio
D_ratio : float
Stator diameter ratio
J_r : float
Rotor winding Current density
L_sm : float
mutual inductance
Q_r : float
Rotor slots
R_R : float
Rotor resistance
b_r : float
rotor slot width
b_tr : float
rotor tooth width
b_trmin : float
minimum tooth width
"""
def initialize(self):
self.options.declare("n_pc", default=20)
def setup(self):
n_pc = self.options["n_pc"]
# Constants and parameters
self.add_input("B_r", val=1.2, units="T")
self.add_input("E", val=0.0, units="Pa")
self.add_input("G", val=0.0, units="Pa")
self.add_input("P_Fe0e", val=1.0, units="W/kg")
self.add_input("P_Fe0h", val=4.0, units="W/kg")
self.add_input("S_N", val=-0.002)
self.add_input("alpha_p", val=0.5 * np.pi * 0.7)
self.add_input("b_r_tau_r", val=0.45)
self.add_input("b_ro", val=0.004, units="m")
self.add_input("b_s_tau_s", val=0.45)
self.add_input("b_so", val=0.004, units="m")
self.add_input("cofi", val=0.85)
self.add_input("freq", val=60, units="Hz")
self.add_input("h_i", val=0.001, units="m")
self.add_input("h_sy0", val=0.0)
self.add_input("h_w", val=0.005, units="m")
self.add_input("k_fes", val=0.9)
self.add_input("k_fillr", val=0.7)
self.add_input("k_fills", val=0.65)
self.add_input("k_s", val=0.2)
self.add_discrete_input("m", val=3)
self.add_input("mu_0", val=np.pi * 4e-7, units="m*kg/s**2/A**2")
self.add_input("mu_r", val=1.06, units="m*kg/s**2/A**2")
self.add_input("p", val=3.0)
self.add_input("phi", val=np.deg2rad(90), units="rad")
self.add_discrete_input("q1", val=6)
self.add_discrete_input("q2", val=4)
self.add_input("ratio_mw2pp", val=0.7)
self.add_input("resist_Cu", val=1.8e-8 * 1.4, units="ohm/m")
self.add_input("sigma", val=40e3, units="Pa")
self.add_input("v", val=0.3)
self.add_input("y_tau_p", val=1.0)
self.add_input("y_tau_pr", val=10.0 / 12)
# General inputs
# self.add_input('r_s', val=0.0, units='m', desc='airgap radius r_s')
self.add_input("I_0", val=0.0, units="A")
self.add_input("rated_torque", val=0.0, units="N*m")
self.add_input("d_r", val=0.0, units="m")
self.add_input("h_m", val=0.0, units="m")
self.add_input("h_0", val=0.0, units="m")
self.add_input("h_s", val=0.0, units="m")
self.add_input("len_s", val=0.0, units="m")
self.add_input("machine_rating", val=0.0, units="W")
self.add_input("shaft_rpm", val=np.zeros(n_pc), units="rpm")
self.add_input("n_r", val=0.0)
self.add_input("rad_ag", val=0.0, units="m")
self.add_input("t_wr", val=0.0, units="m")
# Structural design variables
self.add_input("n_s", val=0.0)
self.add_input("b_st", val=0.0, units="m")
self.add_input("d_s", val=0.0, units="m")
self.add_input("t_ws", val=0.0, units="m")
self.add_input("D_shaft", val=0.0, units="m")
# Material properties
self.add_input("rho_Copper", val=8900.0, units="kg*m**-3")
self.add_input("rho_Fe", val=7700.0, units="kg*m**-3")
self.add_input("rho_Fes", val=7850.0, units="kg*m**-3")
self.add_input("rho_PM", val=7450.0, units="kg*m**-3")
# Magnetic loading
self.add_output("B_rymax", val=0.0, units="T")
self.add_output("B_trmax", val=0.0, units="T")
self.add_output("B_tsmax", val=0.0, units="T")
self.add_output("B_g", val=0.0, units="T")
self.add_output("B_g1", val=0.0, units="T")
self.add_output("B_pm1", val=0.0)
# Stator design
self.add_output("N_s", val=0.0)
self.add_output("b_s", val=0.0, units="m")
self.add_output("b_t", val=0.0, units="m")
self.add_output("A_Curcalc", val=0.0, units="mm**2")
self.add_output("A_Cuscalc", val=0.0, units="mm**2")
# Rotor magnet dimension
self.add_output("b_m", val=0.0)
# Mass Outputs
self.add_output("mass_PM", val=0.0, units="kg")
self.add_output("Copper", val=0.0, units="kg")
self.add_output("Iron", val=0.0, units="kg")
self.add_output("Structural_mass", val=0.0, units="kg")
self.add_output("generator_mass", val=0.0, units="kg")
# Electrical performance
self.add_output("f", val=np.zeros(n_pc))
self.add_output("I_s", val=np.zeros(n_pc), units="A")
self.add_output("R_s", val=np.zeros(n_pc), units="ohm")
self.add_output("L_s", val=0.0)
self.add_output("J_s", val=np.zeros(n_pc), units="A*m**-2")
self.add_output("A_1", val=np.zeros(n_pc))
# Objective functions
self.add_output("K_rad", val=0.0)
self.add_output("Losses", val=np.zeros(n_pc), units="W")
self.add_output("eandm_efficiency", val=np.zeros(n_pc))
# Structural performance
self.add_output("u_ar", val=0.0, units="m")
self.add_output("u_as", val=0.0, units="m")
self.add_output("u_allow_r", val=0.0, units="m")
self.add_output("u_allow_s", val=0.0, units="m")
self.add_output("y_ar", val=0.0, units="m")
self.add_output("y_as", val=0.0, units="m")
self.add_output("y_allow_r", val=0.0, units="m")
self.add_output("y_allow_s", val=0.0, units="m")
self.add_output("z_ar", val=0.0, units="m")
self.add_output("z_as", val=0.0, units="m")
self.add_output("z_allow_r", val=0.0, units="m")
self.add_output("z_allow_s", val=0.0, units="m")
self.add_output("b_allow_r", val=0.0, units="m")
self.add_output("b_allow_s", val=0.0, units="m")
self.add_output("TC1", val=0.0, units="m**3")
self.add_output("TC2r", val=0.0, units="m**3")
self.add_output("TC2s", val=0.0, units="m**3")
# Other parameters
self.add_output("R_out", val=0.0, units="m")
self.add_output("S", val=0.0)
self.add_output("Slot_aspect_ratio", val=0.0)
self.add_output("Slot_aspect_ratio1", val=0.0)
self.add_output("Slot_aspect_ratio2", val=0.0)
self.add_output("D_ratio", val=0.0)
self.add_output("J_r", val=np.zeros(n_pc))
self.add_output("L_sm", val=0.0)
self.add_output("Q_r", val=0.0)
self.add_output("R_R", val=0.0)
self.add_output("b_r", val=0.0)
self.add_output("b_tr", val=0.0)
self.add_output("b_trmin", val=0.0)
# ----------------------------------------------------------------------------------------
class PMSG_Outer(GeneratorBase):
"""
Estimates overall electromagnetic dimensions and Efficiency of PMSG -arms generator.
Parameters
----------
P_mech : float, [W]
Shaft mechanical power
N_c : float
Number of turns per coil
b : float
Slot pole combination
c : float
Slot pole combination
E_p : float, [V]
Stator phase voltage
h_yr : float, [m]
rotor yoke height
h_ys : float, [m]
Yoke height
h_sr : float, [m]
Structural Mass
h_ss : float, [m]
Stator yoke height
t_r : float, [m]
Rotor disc thickness
t_s : float, [m]
Stator disc thickness
y_sh : float, [m]
Shaft deflection
theta_sh : float, [rad]
slope of shaft
D_nose : float, [m]
Nose outer diameter
y_bd : float, [m]
Deflection of the bedplate
theta_bd : float, [rad]
Slope at the bedplate
u_allow_pcent : float
Radial deflection as a percentage of air gap diameter
y_allow_pcent : float
Radial deflection as a percentage of air gap diameter
z_allow_deg : float, [deg]
Allowable torsional twist
B_tmax : float, [T]
Peak Teeth flux density
Returns
-------
B_smax : float, [T]
Peak Stator flux density
B_symax : float, [T]
Peak Stator flux density
tau_p : float, [m]
Pole pitch
q : float, [N/m**2]
Normal stress
len_ag : float, [m]
Air gap length
h_t : float, [m]
tooth height
tau_s : float, [m]
Slot pitch
J_actual : float, [A/m**2]
Current density
T_e : float, [N*m]
Electromagnetic torque
twist_r : float, [deg]
torsional twist
twist_s : float, [deg]
Stator torsional twist
Structural_mass_rotor : float, [kg]
Rotor mass (kg)
Structural_mass_stator : float, [kg]
Stator mass (kg)
| |
return Table([row.map(func) for row in self], default=self.default)
def as_list(self):
"""Convert the table to a list of dictionaries.
Examples
--------
>>> table = Table([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
>>> table.as_list()
[{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]
Returns
-------
list: list of dictionaries representing the table
"""
return [row.data for row in self.data]
# ----------------------------------------------------------------------
# NFA
# ----------------------------------------------------------------------
class Nfa:
"""
An NFA (non-deterministic finite automaton).
"""
def __init__(self, data):
"""Instantiate an Nfa object from a sequence of transition
rules.
Parameters
----------
data : a sequence of transition rules
Data may be a Table object, a sequence of TableRow objects
or a sequence of dictionaries.
"""
self.table = self._process_data(data)
self._epsilon_closure = \
[self._find_epsilon_closure(n) for n in range(len(self))]
def __len__(self):
"""Get the number of states in the NFA.
Returns
-------
number_of_states : number of states
The number of states corresponds to the length of the
transition table.
"""
return len(self.table)
def __getitem__(self, state):
"""Get all possible transitions for a state.
Parameters
----------
state : a state of the NFA represented as an integer
Returns
-------
possible_transitions : possible transitions for the state
The possible transitions for a state are represented as
one row of a transition table.
"""
return self.table[state]
def __str__(self):
"""Get a string representation of the NFA in form of a look-up
table.
Returns
-------
string : string representation
"""
return str(self.table)
@staticmethod
def _process_data(data):
"""Convert data to a Table object.
Parameters
----------
data : an iterable collection of table rows
Data may be a Table object, a collection of TableRow objects
or a collection of dictionaries.
Returns
-------
table : a Table object
"""
if isinstance(data, Table):
return data
return Table(data, default=set)
def _find_epsilon_closure(self, state):
"""
Find the epsilon closure of a state.
The epsilon clojure of a state is a set of states of the NFA
which include the initial state itself and all those states
which can be reached via epsilon-transitions from that initial
state.
Parameters
----------
state : state of the NFA represented as an integer
Returns
-------
epsilon_clojure : set of states represented as a set of integers
"""
def iter_search(states, update):
if not update:
return states
update = update - states
states = states | update
update = set_union(*[self[u][EPSILON] for u in update])
return iter_search(states, update - states)
return iter_search({state}, self[state][EPSILON])
@staticmethod
def _concat_tables(*tables):
"""
Concatenate several tables which represent NFA transition
states.
The default return value of the resultant table is the default
return value of the first table.
Examples
--------
>>> table_1 = Table([{"a": {1}, "b": {1}}, {}], default=None)
===== ======== ========
a b
===== ======== ========
0 {1} {1}
1 None None
===== ======== ========
>>> table_2 = Table([{"c": {1}}, {}], default=None)
===== ========
c
===== ========
0 {1}
1 None
===== ========
Concatenated tables:
===== ======== ======== ========
a b c
===== ======== ======== ========
0 {1} {1} None
1 None None None
2 None None {3}
3 None None None
===== ======== ======== ========
Parameters
----------
tables : NFA transition tables
The tables should contains sets of integers as values.
Returns
-------
table, offsets : concatenated table and a list of offsets
In the resultant table the transition states are adjusted
to the new row indexing. The offsets are integers which
point at the beginnings of the original tables in the
resultant table.
"""
# Get the default return value of the first table.
default = tables[0].default
# Calculate the lengths of the tables.
lengths = [len(table) for table in tables]
# Calculate the offsets.
offsets = [0] + [sum(lengths[:i + 1]) for i in range(len(tables) - 1)]
# Construct new tables with adjusted transitional states.
shifted = [table.map(lambda x: {elem + i for elem in x})
for table, i in zip(tables, offsets)]
# Construct the resultant table.
result = Table(
[row for table in shifted for row in table], default=default)
return result, offsets
def epsilon_closure(self, states):
"""
Get the epsilon closure of a state or a set of states.
The epsilon clojure is the initial set of states itself plus all
states of the NFA which can be reached by epsilon-transitions
from those initial states.
Parameters
----------
states : a state or a set of states
One single state is represented by an integer, a set of
states is represented by an iterable collection of integers.
Returns
-------
epsilon_closure : set of states
The set of states is represented by a set of integers.
"""
if isinstance(states, int):
return self._epsilon_closure[states]
return set_union(*[self._epsilon_closure[state] for state in states])
@property
def final(self):
"""Get the final state of the NFA.
Returns
-------
state : final state represented by integer
"""
return len(self) - 1
def get_input_characters(self, states):
return set_union(
*[self[state].columns for state in states]) - {EPSILON}
def is_final(self, states):
"""
Does a set of states constitute a final state?
It does constitute a final state if the (single) final state of
the NFA is contained therein.
Parameters
----------
states : a set of states
The set of states is represented as an iterable collection
of integers.
Returns
-------
result : boolean
"""
return self.final in states
def transition(self, states, char):
"""
Transition on character input from the initial set of states to
the resultant set of states.
Parameters
----------
states : an initial set of states
The set of states is represented as an iterable collection
of integers.
char : input character
Returns
-------
resultant_states : set of resultant states
The resultant states are represented as a set of integers.
"""
start = self.epsilon_closure(states)
end = set_union(*[self[state][char] for state in start])
return self.epsilon_closure(end)
def match(self, string):
"""
Match a string.
Parameters
----------
string : an input string
Returns
-------
result : boolean
"""
states = {0}
for char in string:
states = self.transition(states, char)
if not states:
return False
return self.is_final(states)
@classmethod
def epsilon(cls):
"""
Construct an epsilon-NFA.
Returns
-------
epsilon_nfa : an NFA which accepts only epsilon
"""
return cls([{EPSILON: {1}}, {}])
@classmethod
def char(cls, char):
"""
Construct a single character NFA.
Parameters
----------
char : a character
Returns
-------
char_nfa : an NFA which accepts only a single character
"""
return cls([{char: {1}}, {}])
@classmethod
def concat(cls, *nfas):
"""
Construct a concatenation of NFAs.
Parameters
----------
nfas : a variable number of Nfa objects
Returns
-------
concat_nfa : an NFA which is a concatenation of the initial NFAs
"""
# Concatenate tables.
table, offsets = cls._concat_tables(*[nfa.table for nfa in nfas])
# Connect the original tables by linking the last row of a
# preceding table with the first row of the following table by
# an epsilon-transition.
for offset in offsets[1:]:
table[offset - 1][EPSILON] = {offset}
return cls(table)
@classmethod
def union(cls, *nfas):
"""
Construct a union of NFAs.
Parameters
----------
nfas : a variable number of Nfa objects
Returns
-------
union_nfa : an NFA which is the union of the initial NFAs
"""
# Create two table with an empty row, one for the initial state
# of the NFA and one for the final state.
tables = [Table.with_empty_row(default=set)] +\
[nfa.table for nfa in nfas] +\
[Table.with_empty_row(default=set)]
# Concatenate tables.
table, offsets = cls._concat_tables(*tables)
# Link the initial state of the NFA with the first rows of the
# original tables by epsilon transitions.
table[0][EPSILON] = set(offsets[1:-1])
# Link the last rows of the original tables with the final state
# of the NFA by epsilon transitions.
for offset in offsets[2:]:
table[offset - 1][EPSILON] = {table.final}
return cls(table)
@classmethod
def star(cls, nfa):
"""
Construct a Kleene-star NFA.
Parameters
----------
nfa : an NFA object
Returns
-------
star_nfa : an NFA which is a Kleene-star of the initial NFA
"""
# Create two table with an empty row, one for the initial state
# of the NFA and one for the final state.
tables = [Table.with_empty_row(default=set),
nfa.table,
Table.with_empty_row(default=set)]
# # | |
<reponame>claytonbrown/security_monkey<filename>security_monkey/common/utils/PolicyDiff.py<gh_stars>0
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# ^^ required by pep-0263 for "¿?"
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.common.utils.PolicyDiff
:platform: Unix
:synopsis: Takes two JSON or dict objects and finds their differences. Returns color-coded HTML.
Needs to be refactored completely.
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>> @monkeysecurity
"""
import json
import sys
import collections
def i(indentlevel):
retstr = ''
for i in range(0, indentlevel):
# The   prevents a user from copying the output from a browser window into anything that requires
# valid JSON. Instead of copying as a tab or space character, it copies as an invalid whitespace character HEX E28083
#retstr += ' '
retstr += ' '
return retstr
# ADDED
# CHANGED
# Type Change
# Regular Change
# DELETED
def processSubDict(key, sda, sdb, indentlevel):
if type(sda) is not type(sdb):
raise ValueError("processSubDict requires that both items have the same type.")
# BUG: What if going from None to 'vpc-1de23c'
retstr = ''
brackets = charfortype(sda)
if type(sda) is str or type(sdb) is unicode:
if sda == sdb:
retstr += same("{4}\"{0}\": {2}{1}{3},".format(key, sda, brackets[0], brackets[1], i(indentlevel)))
else:
retstr += deleted("{4}\"{0}\": {2}{1}{3},".format(key, sdb, brackets[0], brackets[1], i(indentlevel)))
retstr += added("{4}\"{0}\": {2}{1}{3},".format(key, sda, brackets[0], brackets[1], i(indentlevel)))
elif type(sda) is type(None) or type(sda) is bool or type(sda) is int:
if sda == sdb:
retstr += same("{2}\"{0}\": {1},".format(key, json.dumps(sda), i(indentlevel)))
else:
retstr += deleted("{2}\"{0}\": {1},".format(key, json.dumps(sda), i(indentlevel)))
retstr += added("{2}\"{0}\": {1},".format(key, json.dumps(sda), i(indentlevel)))
elif type(sda) is dict:
retstr += same("{4}\"{0}\": {2}<br/>\n{1}{4}{3},".format(key, diffdict(sda, sdb, indentlevel+1), brackets[0], brackets[1],i(indentlevel)))
elif type(sda) is list:
retstr += same("{4}\"{0}\": {2}<br/>\n{1}{4}{3},".format(key, difflist(sda, sdb, indentlevel+1), brackets[0], brackets[1],i(indentlevel)))
else:
print "processSubDict - Unexpected diffdict type {}".format(type(sda))
return retstr
def formbrack(value, indentlevel):
brackets = {}
brackets['open'] = ''
brackets['close'] = ''
if type(value) is str or type(value) is unicode:
brackets['open'] = '"'
brackets['close'] = '"'
elif type(value) is dict:
brackets['open'] = '{<br/>\n'
brackets['close'] = i(indentlevel)+'}'
elif type(value) is list:
brackets['open'] = '[<br/>\n'
brackets['close'] = i(indentlevel)+']'
else:
# print "formbrack - Unexpected diffdict type {}".format(type(value))
pass
return brackets
def printlist(structure, action, indentlevel):
retstr = ''
for value in structure:
brackets = formbrack(value, indentlevel)
new_value = ""
if type(value) is str or type(value) is unicode:
new_value = value
elif type(value) is dict:
new_value = printdict(value, action, indentlevel+1)
elif type(value) is list:
new_value = printlist(value, action, indentlevel+1)
else:
print "printlist - Unexpected diffdict type {}".format(type(value))
content = "{3}{1}{0}{2},".format(new_value, brackets['open'], brackets['close'],i(indentlevel))
if action is 'same':
retstr += same(content)
elif action is 'deleted':
retstr += deleted(content)
elif action is 'added':
retstr += added(content)
return removeLastComma(retstr)
def printdict(structure, action, indentlevel):
retstr = ''
for key in structure.keys():
value = structure[key]
brackets = formbrack(value, indentlevel)
new_value = ''
if type(value) is str or type(value) is unicode or type(value) is int:
new_value = value
elif type(value) is bool or type(value) is type(None):
new_value = json.dumps(value)
elif type(value) is dict:
new_value = printdict(value, action, indentlevel+1)
elif type(value) is list:
new_value = printlist(value, action, indentlevel+1)
else:
print "printdict - Unexpected diffdict type {}".format(type(value))
content = "{4}\"{0}\": {2}{1}{3},".format(key, new_value, brackets['open'], brackets['close'],i(indentlevel))
if action is 'same':
retstr += same(content)
elif action is 'deleted':
retstr += deleted(content)
elif action is 'added':
retstr += added(content)
return removeLastComma(retstr)
def printsomething(value, action, indentlevel):
if type(value) is str or type(value) is unicode or type(value) is int:
return value
elif type(value) is bool or type(value) is type(None):
new_value = json.dumps(value)
elif type(value) is dict:
return printdict(value, action, indentlevel)
elif type(value) is list:
return printlist(value, action, indentlevel)
else:
print "printsomething - Unexpected diffdict type {}".format(type(value))
return ''
def diffdict(dicta, dictb, indentlevel):
"""
diffdict and difflist are recursive methods which build an HTML representation of the differences between two objects.
TODO: diffdict does not add commas
"""
retstr = ''
for keya in dicta.keys():
if not dictb.has_key(keya):
brackets = charfortype(dicta[keya])
if type(dicta[keya]) is str or type(dicta[keya]) is unicode:
retstr += added("{4}\"{0}\": {2}{1}{3},".format(keya, printsomething(dicta[keya], 'added', indentlevel+1), brackets[0], brackets[1], i(indentlevel)))
if type(dicta[keya]) is list or type(dicta[keya]) is dict:
retstr += added("{4}\"{0}\": {2}<br/>\n{1}{4}{3},".format(keya, printsomething(dicta[keya], 'added', indentlevel+1), brackets[0], brackets[1], i(indentlevel)))
else:
if not type(dicta[keya]) is type(dictb[keya]):
brackets = charfortype(dictb[keya])
retstr += deleted("{4}\"{0}\": {2}{1}{3},".format(keya, dictb[keya], brackets[0], brackets[1], i(indentlevel)))
brackets = charfortype(dicta[keya])
retstr += added("{4}\"{0}\": {2}{1}{3},".format(keya, dicta[keya], brackets[0], brackets[1],i(indentlevel)))
else:
retstr += processSubDict(keya, dicta[keya], dictb[keya], indentlevel)
for keyb in dictb.keys():
if not dicta.has_key(keyb):
brackets = charfortype(dictb[keyb])
if type(dictb[keyb]) is str or type(dictb[keyb]) is unicode:
retstr += deleted("{4}\"{0}\": {2}{1}{3},".format(keyb, printsomething(dictb[keyb], 'deleted', indentlevel+1), brackets[0], brackets[1],i(indentlevel)))
if type(dictb[keyb]) is list or type(dictb[keyb]) is dict:
retstr += deleted("{4}\"{0}\": {2}<br/>\n{1}{4}{3},".format(keyb, printsomething(dictb[keyb], 'deleted', indentlevel+1), brackets[0], brackets[1],i(indentlevel)))
return removeLastComma(retstr)
def removeLastComma(str):
position = str.rfind(',')
retstr = str[:position] + str[position+1:]
return retstr
def difflist(lista, listb, indentlevel):
"""
diffdict and difflist are recursive methods which build an HTML representation of the differences between two objects.
TODO: difflist adds commas after every entry, even the last entry.
When multiple items in a list have been modified, the levenshtein distance is used to find which items most likely
were modified from one object to the next. Although this may not be necessary when the list items are primatives
like strings, it is very useful when the two list items are multi-level dicts. This allows us to only color-code the
actual change and not the entire sub structure.
[{name: 'Patrick', colleagues: ['Sam', 'Jason']}]
[{name: 'Patrick', colleagues: ['Sam', 'Jason', 'Ben']}]
* By Using levenshtein, we can color green just the string 'Ben'. (Preferred)
BLACK - [{name: 'Patrick', colleagues: ['Sam', 'Jason',
GREEN - 'Ben'
BLACK - ]}]
* Without levenshtein, we would end up coloring the entire dict twice:
RED - {name: 'Patrick', colleagues: ['Sam', 'Jason']}
GREEN - {name: 'Patrick', colleagues: ['Sam', 'Jason', 'Ben']}
"""
retstr = ''
addedlist = []
deletedlist = []
for item in lista:
if item in listb:
brackets = charfortype(item)
if type(item) is str or type(item) is unicode:
retstr += same("{3}{1}{0}{2},".format(item, brackets[0], brackets[1],i(indentlevel)))
else:
# Handle lists and dicts here:
diffstr = ''
if type(item) is list or type(item) is dict:
diffstr = printsomething(item, 'same', indentlevel+1)
retstr += same("{3}{1}<br/>\n{0}{3}{2},".format(diffstr, brackets[0], brackets[1],i(indentlevel)))
else:
addedlist.append(item)
for item in listb:
if not item in lista:
deletedlist.append(item)
for item in addedlist:
bestmatch = findmostsimilar(item, deletedlist)
brackets = charfortype(item)
if None is bestmatch:
if type(item) is str or type(item) is unicode:
retstr += added("{3}{1}{0}{2},".format(item, brackets[0], brackets[1],i(indentlevel)))
else:
# Handle lists and dicts here:
diffstr = ''
if type(item) is list or type(item) is dict:
diffstr = printsomething(item, 'added', indentlevel+1)
retstr += added("{3}{1}<br/>\n{0}{3}{2},".format(diffstr, brackets[0], brackets[1],i(indentlevel)))
else:
if type(item) is str or type(item) is unicode:
retstr += deleted("{3}{1}{0}{2},".format(bestmatch, brackets[0], brackets[1],i(indentlevel)))
retstr += added("{3}{1}{0}{2},".format(item, brackets[0], brackets[1],i(indentlevel)))
else:
# Handle lists and dicts here:
diffstr = ''
if type(item) is list:
diffstr = difflist(item, bestmatch, indentlevel+1)
elif type(item) is dict:
diffstr = diffdict(item, bestmatch, indentlevel+1)
retstr += same("{3}{1}<br/>\n{0}{3}{2},".format(diffstr, brackets[0], brackets[1],i(indentlevel)))
deletedlist.remove(bestmatch)
for item in deletedlist:
brackets = charfortype(item)
if type(item) is str or type(item) is unicode:
retstr += deleted("{3}{1}{0}{2},".format(item, brackets[0], brackets[1],i(indentlevel)))
else:
# Handle lists and dicts here:
diffstr = ''
if type(item) is list or type(item) is dict:
diffstr = printsomething(item, 'deleted', indentlevel+1)
retstr += deleted("{3}{1}<br/>\n{0}{3}{2},".format(diffstr, brackets[0], brackets[1],i(indentlevel)))
return removeLastComma(retstr)
# levenshtein - http://hetland.org/coding/python/levenshtein.py
def strdistance(a,b):
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def findmostsimilar(item, list):
stritem = str(item)
mindistance = sys.maxint
bestmatch = None
| |
of the features to consider when looking for the best split at each node.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels.
max_features_ : int
The inferred value of max_features.
n_samples_ : int
The number of samples when `fit` is performed.
n_features_ : int
The number of features when `fit` is performed.
root_ : Node
The root node of the tree after fitting.
compiled_root_ : CompiledTree
The compiled root node of the tree after fitting.
"""
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.robust_weight = robust_weight
self.attack_model = attack_model
self.one_adversarial_class = one_adversarial_class
self.chen_heuristic = chen_heuristic
self.compile = compile
self.random_state = random_state
def _check_target(self, y):
target_type = type_of_target(y)
if target_type != "binary":
raise ValueError(
f"Unknown label type: classifier only supports binary labels but found {target_type}"
)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def _score(self, y):
return gini_impurity(np.sum(y == 0), np.sum(y == 1))
def _create_leaf(self, y):
"""
Create a leaf object that predicts according to the ratio of benign
and malicious labels in the array y.
"""
# Count the number of points that fall into this leaf including
# adversarially moved points
label_counts = np.bincount(y, minlength=2)
# Set the leaf's prediction value to the weighted average of the
# prediction with and without moving points
value = label_counts / np.sum(label_counts)
return Node(_TREE_UNDEFINED, _TREE_LEAF, _TREE_LEAF, value)
def _scan_feature(self, X, y, feature, constraints):
"""
Scan feature to find the locally optimal split.
"""
samples = X[:, feature]
attack_mode = self.attack_model_[feature]
constraint = constraints[feature]
# If possible, use the faster scan implementation
if self.robust_weight == 1:
return _scan_numerical_feature_fast(
samples,
y,
*attack_mode,
*constraint,
self.chen_heuristic,
self.one_adversarial_class,
)
else:
return self.__scan_feature_numerical(samples, y, attack_mode, *constraint)
def __initialize_scan(self, samples, y, attack_mode):
queue = []
counts = np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]],
dtype=np.int64,
)
if attack_mode == "":
counts[RIGHT] = np.bincount(y)
for sample, label in zip(samples, y):
queue.append((sample, label, RIGHT, LEFT))
elif attack_mode == ">":
counts[RIGHT] = np.bincount(y)
for sample, label in zip(samples, y):
if label == 0 and self.one_adversarial_class:
queue.append((sample, label, RIGHT, LEFT))
else:
queue.append((sample, label, RIGHT, LEFT_INTERSECT))
elif attack_mode == "<":
if self.one_adversarial_class:
counts[RIGHT][0] = np.sum(y == 0)
counts[RIGHT_INTERSECT][1] = np.sum(y == 1)
else:
counts[RIGHT_INTERSECT] = np.bincount(y)
for sample, label in zip(samples, y):
if label == 0 and self.one_adversarial_class:
queue.append((sample, label, RIGHT, LEFT))
else:
queue.append((sample, label, RIGHT_INTERSECT, LEFT))
elif attack_mode == "<>":
if self.one_adversarial_class:
counts[RIGHT][0] = np.sum(y == 0)
counts[RIGHT_INTERSECT][1] = np.sum(y == 1)
else:
counts[RIGHT_INTERSECT] = np.bincount(y)
for sample, label in zip(samples, y):
if label == 0 and self.one_adversarial_class:
queue.append((sample, label, RIGHT, LEFT))
else:
queue.append((sample, label, RIGHT_INTERSECT, LEFT_INTERSECT))
elif isinstance(attack_mode, numbers.Number):
counts[RIGHT] = np.bincount(y)
for sample, label in zip(samples, y):
if label == 0 and self.one_adversarial_class:
queue.append((sample, label, RIGHT, LEFT))
else:
queue.append(
(sample - attack_mode, label, RIGHT, RIGHT_INTERSECT),
)
queue.append(
(sample, label, RIGHT_INTERSECT, LEFT_INTERSECT),
)
queue.append(
(sample + attack_mode, label, LEFT_INTERSECT, LEFT),
)
elif isinstance(attack_mode, tuple):
counts[RIGHT] = np.bincount(y)
for sample, label in zip(samples, y):
if label == 0 and self.one_adversarial_class:
queue.append((sample, label, RIGHT, LEFT))
else:
queue.append(
(sample - attack_mode[0], label, RIGHT, RIGHT_INTERSECT),
)
queue.append(
(sample, label, RIGHT_INTERSECT, LEFT_INTERSECT),
)
queue.append(
(sample + attack_mode[1], label, LEFT_INTERSECT, LEFT),
)
# Sort queue in reverse order since popping from end is faster
queue.sort(reverse=True)
return queue, counts
def __scan_feature_numerical(
self,
samples,
y,
attack_mode,
left_bound,
right_bound,
):
"""
Scan a numerical feature for the optimal split by identifying every
potential split, sorting these and iterating through them.
While iterating from the left to the right, remember counts for the
right / right_intersect / left_intersect / left positions and label.
"""
best_score = 10e9
best_split = None
queue, counts = self.__initialize_scan(samples, y, attack_mode)
while len(queue) > 0 and queue[-1][0] < left_bound:
point, label, move_from, move_to = queue.pop()
counts[move_from][label] -= 1
counts[move_to][label] += 1
adv_gini = None
while queue:
point, label, move_from, move_to = queue.pop()
counts[move_from][label] -= 1
counts[move_to][label] += 1
if point >= right_bound:
break
if len(queue) > 0 and queue[-1][0] != point:
# Compute the adversarial Gini gain
if self.one_adversarial_class:
adv_gini = _counts_to_one_class_adv_gini(
counts, self.robust_weight, self.chen_heuristic
)
else:
adv_gini = _counts_to_two_class_adv_gini(
counts, self.robust_weight, self.chen_heuristic
)
# Maximize the margin of the split
split = (point + queue[-1][0]) * 0.5
if (
adv_gini is not None
and adv_gini < best_score
and split < right_bound
):
best_score = adv_gini
best_split = split
if len(queue) == 0:
break
return best_score, best_split
def _split_left_right(self, X, y, rule, feature):
"""
Split the dataset (X, y) into a left and right dataset according to the
optimal split determined by rule and feature.
Only a ratio 'robust_weight' of moving (malicious) points are actually
transfered to the other side.
"""
# Get perturbation range for this feature
dec, inc = self.attack_model_[feature]
if self.one_adversarial_class:
# Determine the indices of samples on each side of the split
label_0 = y == 0
label_1 = np.invert(label_0)
i_left = np.where(
(label_0 & (X[:, feature] <= rule))
| (label_1 & (X[:, feature] + inc <= rule))
)[0]
i_left_intersection = np.where(
label_1 & (X[:, feature] + inc > rule) & (X[:, feature] <= rule)
)[0]
i_right_intersection = np.where(
label_1 & (X[:, feature] > rule) & (X[:, feature] - dec <= rule)
)[0]
i_right = np.where(
(label_0 & (X[:, feature] > rule))
| (label_1 & (X[:, feature] - dec > rule))
)[0]
else:
# Determine the indices of samples on each side of the split
i_left = np.where(X[:, feature] + inc <= rule)[0]
i_left_intersection = np.where(
(X[:, feature] + inc > rule) & (X[:, feature] <= rule)
)[0]
i_right_intersection = np.where(
(X[:, feature] > rule) & (X[:, feature] - dec <= rule)
)[0]
i_right = np.where(X[:, feature] - dec > rule)[0]
# Count samples with labels 0 and 1 left and right
l_0, l_1 = np.bincount(y[i_left], minlength=2)
r_0, r_1 = np.bincount(y[i_right], minlength=2)
# Determine labels on the left and right intersection
y_left_intersection = y[i_left_intersection]
y_right_intersection = y[i_right_intersection]
i_left_intersection_0 = np.where(y_left_intersection == 0)[0]
i_left_intersection_1 = np.where(y_left_intersection == 1)[0]
i_right_intersection_0 = np.where(y_right_intersection == 0)[0]
i_right_intersection_1 = np.where(y_right_intersection == 1)[0]
li_0 = len(i_left_intersection_0)
li_1 = len(i_left_intersection_1)
ri_0 = len(i_right_intersection_0)
ri_1 = len(i_right_intersection_1)
# Compute optimal movement
if self.one_adversarial_class:
# Compute numbers of samples after applying rho
assert li_0 == 0
assert ri_0 == 0
l_1 = l_1 + round((1.0 - self.robust_weight) * li_1)
r_1 = r_1 + round((1.0 - self.robust_weight) * ri_1)
li_1 = round(self.robust_weight * li_1)
ri_1 = round(self.robust_weight * ri_1)
i_1 = li_1 + ri_1
# Determine optimal movement
if self.chen_heuristic:
_, m1 = chen_adversarial_gini_gain_one_class(
l_0, l_1, r_0, r_1, li_1, ri_1
)
else:
_, m1 = adversarial_gini_gain_one_class(l_0, l_1, r_0, r_1, i_1)
m0 = None
else:
# Compute numbers of samples after applying rho
l_0 = l_0 + round((1.0 - self.robust_weight) * li_0)
l_1 = l_1 + round((1.0 - self.robust_weight) * li_1)
r_0 = r_0 + round((1.0 - self.robust_weight) * ri_0)
r_1 = r_1 + round((1.0 - self.robust_weight) * ri_1)
li_0 = round(self.robust_weight * li_0)
li_1 = round(self.robust_weight * li_1)
ri_0 = round(self.robust_weight * ri_0)
ri_1 = round(self.robust_weight * ri_1)
# Determine optimal movement
if self.chen_heuristic:
_, m1, m0 = chen_adversarial_gini_gain_two_class(
l_0, l_1, li_0, li_1, ri_0, ri_1, r_0, r_1
)
else:
_, m1, m0 = adversarial_gini_gain_two_class(
l_0, l_1, li_0, li_1, ri_0, ri_1, r_0, r_1
)
# Move label 1 samples according to m1
if m1 > li_1:
n_move_left = m1 - li_1
i_left_intersection_1 = np.concatenate(
(i_left_intersection_1, i_right_intersection_1[:n_move_left])
)
i_right_intersection_1 = i_right_intersection_1[n_move_left:]
elif m1 < li_1:
n_move_right = li_1 - m1
i_left_intersection_1 = i_left_intersection_1[n_move_right:]
i_right_intersection_1 = np.concatenate(
(i_left_intersection_1[:n_move_right], i_right_intersection_1)
)
# Move label 0 samples according to m0 (not used if one_adversarial_class=True)
if m0:
if m0 > li_0:
n_move_left = m0 - li_0
i_left_intersection_0 = np.concatenate(
(i_left_intersection_0, i_right_intersection_0[:n_move_left])
)
i_right_intersection_0 = i_right_intersection_0[n_move_left:]
elif m0 < li_0:
n_move_right = li_0 - m0
i_left_intersection_0 = i_left_intersection_0[n_move_right:]
i_right_intersection_0 = np.concatenate(
(i_left_intersection_0[:n_move_right], i_right_intersection_0)
)
i_left = np.concatenate(
(
i_left,
i_left_intersection_0,
i_left_intersection_1,
)
)
i_right = np.concatenate(
(
| |
import csv
import obspy
import numpy as np
import warnings
from copy import deepcopy
from obspy import taup
from obspy.geodetics import gps2dist_azimuth
from os.path import basename, exists
from mtuq.util import AttribDict, warn
from mtuq.util.cap import WeightParser, taper
from mtuq.util.signal import cut, get_arrival, m_to_deg
class ProcessData(object):
""" An attempt at a one-size-fits-all data processing class
.. rubric :: Usage
Processing data is a two-step procedure. First, the user supplies parameters
(see available choices below) to create a data processing function:
.. code::
function = ProcessData(**parameters)
Second, an ObsPy stream is given as input to the data processing function
and a processed stream returned as output:
.. code::
processed_stream = function(stream)
Data processing can also be applied to an entire ``Dataset`` at once:
.. code::
processed_dataset = dataset.map(function)
See `mtuq/examples/` for further illustration.
.. rubric :: Parameters
``filter_type`` (`str`)
- ``'bandpass'``
Butterworth-Bandpass (uses `obspy.signal.filter.bandpass`)
- ``'lowpass'``
Butterworth-Lowpass (uses `obspy.signal.filter.lowpass`)
- ``'highpass'``
Butterworth-Highpass (uses `obspy.signal.filter.highpass`)
- ``None``
no filter will be applied
``pick_type`` (`str`)
- ``'taup'``
calculates P, S arrival times from Tau-P model
(uses `obspy.taup.TauPyModel.get_arrival_times`)
- ``'FK_metadata'``
reads P, S arrival times from FK metadata
- ``'SAC_metadata'``
reads P, S arrival times from SAC metadata fields `t5`, `t6`
- ``'user_supplied'``
reads P, S arrival times from columns 8, 10 of `capuaf_file`
``window_type`` (`str`)
- ``'body_wave'``
chooses window starttime before P arrival
- ``'surface_wave'``
chooses window starttime after S arrival
- ``None``
no windows will be applied
``apply_statics`` (`bool`)
whether or not to apply static time shifts from columns 11-13 of `capuaf_file`
``apply_weights`` (`bool`)
whether or not to apply objective function weights from columns 3-8 of `capuaf_file`
``apply_scaling`` (`bool`)
whether or not to apply distance-dependent amplitude scaling
.. rubric:: Other input arguments that may be required, depending on the above
``freq_min`` (`float`)
Required for `filter_type=bandpass`
``freq_max`` (`float`)
Required for `filter_type=bandpass`
``freq`` (`float`)
Required for `filter_type=lowpass` or `filter_type=highpass`
``window_length`` (`float`)
window length in seconds
``padding`` (`list`)
amount by which Green's functions will be padded relative to data
``taup_model`` (`str`)
Name of built-in ObsPy TauP model or path to custom ObsPy Taup model,
required for `pick_type=taup`
``FK_database`` (`str`)
Path to FK database, required for `pick_type=FK_metadata`
``capuaf_file`` (`str`)
Path to `CAPUAF`-style text file, required for `pick_type=user_supplied`
"""
def __init__(self,
filter_type=None,
window_type=None,
pick_type=None,
window_length=None,
padding=None,
taup_model=None,
FK_database=None,
FK_model=None,
apply_statics=False,
apply_weights=True,
apply_scaling=True,
scaling_power=None,
scaling_coefficient=None,
capuaf_file=None,
**parameters):
if not filter_type:
warn("No filter will be applied")
if not window_type:
warn("No windows will be applied")
if window_type and not pick_type:
raise Exception("Undefined parameter: pick_type")
if filter_type:
filter_type = filter_type.lower()
if window_type:
filter_type = filter_type.lower()
self.filter_type = filter_type
self.window_type = window_type
self.pick_type = pick_type
self.window_length = window_length
self.padding = padding
self.taup_model = taup_model
self.FK_database = FK_database
self.FK_model = FK_model
self.apply_weights = apply_weights
self.apply_statics = apply_statics
self.apply_scaling = apply_scaling
self.scaling_power = scaling_power
self.scaling_coefficient = scaling_coefficient
self.capuaf_file = capuaf_file
#
# check filter parameters
#
if not self.filter_type:
# nothing to check
pass
elif self.filter_type == 'bandpass':
# allow filter corners to be specified in terms of either period [s]
# or frequency [Hz]
if 'period_min' in parameters and 'period_max' in parameters:
assert 'freq_min' not in parameters
assert 'freq_max' not in parameters
parameters['freq_min'] = parameters['period_max']**-1
parameters['freq_max'] = parameters['period_min']**-1
if 'freq_min' not in parameters: raise ValueError
if 'freq_max' not in parameters: raise ValueError
assert 0 < parameters['freq_min']
assert parameters['freq_min'] < parameters['freq_max']
assert parameters['freq_max'] < np.inf
self.freq_min = parameters['freq_min']
self.freq_max = parameters['freq_max']
elif self.filter_type == 'lowpass':
if 'period' in parameters:
assert 'freq' not in parameters
parameters['freq'] = parameters['period']**-1
if 'freq' not in parameters: raise ValueError
assert 0 < parameters['freq']
assert parameters['freq'] < np.inf
self.freq = parameters['freq']
elif self.filter_type == 'highpass':
if 'period' in parameters:
assert 'freq' not in parameters
parameters['freq'] = parameters['period']**-1
if 'freq' not in parameters: raise ValueError
assert 0 <= parameters['freq'] < np.inf
self.freq = parameters['freq']
else:
raise ValueError('Bad parameter: filter_type')
#
# check window parameters
#
#
if not self.window_type:
# nothing to check now
pass
elif self.window_type == 'body_wave':
# nothing to check now
pass
elif self.window_type == 'surface_wave':
# nothing to check now
pass
else:
raise ValueError('Bad parameter: window_type')
if self.window_type:
if self.window_length is None:
raise ValueError('Must be defined: window_length')
assert self.window_length > 0
if self.padding:
assert self.window_type is not None
if self.padding is None:
self.padding = (0., 0.)
#
# check phase pick parameters
#
if not self.pick_type:
# nothing to check now
pass
elif self.pick_type == 'taup':
assert self.taup_model is not None
self._taup = taup.TauPyModel(self.taup_model)
elif self.pick_type == 'FK_metadata':
assert self.FK_database is not None
assert exists(self.FK_database)
if self.FK_model is None:
self.FK_model = basename(self.FK_database)
elif self.pick_type == 'SAC_metadata':
pass
elif self.pick_type == 'user_supplied':
pass
else:
raise ValueError('Bad parameter: pick_type, %s' % self.pick_type)
#
# check weight parameters
#
if apply_scaling:
if self.window_type == 'body_wave':
if self.scaling_power is None:
self.scaling_power = 1.
if self.scaling_coefficient is None:
self.scaling_coefficient = 1.e5
elif self.window_type == 'surface_wave':
if self.scaling_power is None:
self.scaling_power = 0.5
if self.scaling_coefficient is None:
self.scaling_coefficient = 1.e5
#
# parse text files
#
if self.apply_statics or\
self.apply_weights or\
self.pick_type == 'user_supplied':
assert capuaf_file is not None
if self.capuaf_file:
assert exists(capuaf_file)
parser = WeightParser(self.capuaf_file)
if self.apply_statics:
self.statics = parser.parse_statics()
if self.apply_weights:
self.weights = parser.parse_weights()
if self.pick_type == 'user_supplied':
self.picks = parser.parse_picks()
def __call__(self, traces, station=None, origin=None, overwrite=False):
'''
Carries out data processing operations on obspy streams
MTUQ GreensTensors
input traces: all availables traces for a given station
type traces: obspy Stream or MTUQ GreensTensor
'''
if station is None:
station = getattr(traces, 'station', None)
if origin is None:
origin = getattr(traces, 'origin', None)
# overwrite existing data?
if overwrite:
traces = traces
else:
traces = deepcopy(traces)
if not hasattr(traces, 'id'):
raise Exception('Missing station identifier')
id = traces.id
# collect location information
distance_in_m, azimuth, _ = gps2dist_azimuth(
origin.latitude,
origin.longitude,
station.latitude,
station.longitude)
# collect time sampling information
nt, dt = traces[0].stats.npts, traces[0].stats.delta
# Tags can be added through dataset.add_tag to keep track of custom
# metadata or support other customized uses. Here we use tags to
# distinguish data from Green's functions and displacement time series
# from velcoity time series
if not hasattr(traces, 'tags'):
raise Exception('Missing tags attribute')
tags = traces.tags
if 'units:m' in tags:
# nothing to do
pass
elif 'units:cm' in tags:
# convert to meters
for trace in traces:
trace.data *= 1.e-2
index = tags.index('units:cm')
tags[index] = 'units:m'
else:
warn('Units not specified.')
for trace in traces:
trace.attrs = AttribDict()
#
# part 1: filter traces
#
if self.filter_type == 'bandpass':
for trace in traces:
trace.detrend('demean')
trace.detrend('linear')
trace.taper(0.05, type='hann')
trace.filter('bandpass', zerophase=False,
freqmin=self.freq_min,
freqmax=self.freq_max)
elif self.filter_type == 'lowpass':
for trace in traces:
trace.detrend('demean')
trace.detrend('linear')
trace.taper(0.05, type='hann')
trace.filter('lowpass', zerophase=False,
freq=self.freq)
elif self.filter_type == 'highpass':
for trace in traces:
trace.detrend('demean')
trace.detrend('linear')
trace.taper(0.05, type='hann')
trace.filter('highpass', zerophase=False,
freq=self.freq)
if 'type:velocity' in tags:
# convert to displacement
for trace in traces:
trace.data = np.cumsum(trace.data)*dt
index = tags.index('type:velocity')
tags[index] = 'type:displacement'
#
# part 2a: apply distance scaling
#
if self.apply_scaling:
for trace in traces:
trace.data *=\
(distance_in_m/self.scaling_coefficient)**self.scaling_power
#
# part 2b: apply user-supplied data weights
#
if 'type:greens' in tags:
pass
elif self.apply_weights:
for trace in traces:
try:
component = trace.stats.channel[-1].upper()
weight = self.weights[id][self.window_type+'_'+component]
except:
weight = None
if weight:
trace.attrs.weight = weight
else:
traces.remove(trace)
#
# part 3: determine phase picks
#
if self.pick_type == 'user_supplied':
picks = self.picks[id]
else:
picks = dict()
if self.pick_type=='taup':
with warnings.catch_warnings():
# supress obspy warning that gets raised even when taup is
# used correctly (someone should submit an obspy fix)
warnings.filterwarnings('ignore')
arrivals = self._taup.get_travel_times(
origin.depth_in_m/1000.,
m_to_deg(distance_in_m),
phase_list=['p', 's', 'P', 'S'])
try:
picks['P'] = get_arrival(arrivals, 'p')
except:
picks['P'] = get_arrival(arrivals, 'P')
try:
picks['S'] = get_arrival(arrivals, 's')
except:
picks['S'] = get_arrival(arrivals, 'S')
elif self.pick_type=='FK_metadata':
sac_headers = obspy.read('%s/%s_%s/%s.grn.0' %
(self.FK_database,
self.FK_model,
str(int(np.ceil(origin.depth_in_m/1000.))),
str(int(np.ceil(distance_in_m/1000.)))),
format='sac')[0].stats.sac
picks['P'] = sac_headers.t1
picks['S'] = sac_headers.t2
elif self.pick_type=='SAC_metadata':
sac_headers = traces[0].sac
picks['P'] = sac_headers.t5
picks['S'] = sac_headers.t6
for trace in traces:
#
# part 4a: determine window start and end times
#
if self.window_type == 'body_wave':
# reproduces CAPUAF body | |
none or a string of all 0's, so we check for both
return not (cg_id is None or utils.NULL_REF == cg_id)
def _create_snapshot_volume(self, image):
"""Creates snapshot volume for given group with snapshot_id."""
group = self._get_snapshot_group(image['pitGroupRef'])
LOG.debug("Creating snap vol for group %s", group['label'])
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
if self._is_cgsnapshot(image):
return self._client.create_cg_snapshot_view(
image['consistencyGroupId'], label, image['id'])
else:
return self._client.create_snapshot_volume(
image['pitRef'], label, image['baseVol'])
def _create_snapshot_group(self, label, volume, percentage_capacity=20.0):
"""Define a new snapshot group for a volume
:param label: the label for the snapshot group
:param volume: an E-Series volume
:param percentage_capacity: an optional repository percentage
:return: a new snapshot group
"""
# Newer versions of the REST API are capable of automatically finding
# the best pool candidate
if not self._client.features.REST_1_3_RELEASE:
vol_size_gb = int(volume['totalSizeInBytes']) / units.Gi
pools = self._get_sorted_available_storage_pools(vol_size_gb)
volume_pool = next(pool for pool in pools if volume[
'volumeGroupRef'] == pool['id'])
# A disk pool can only utilize a candidate from its own pool
if volume_pool.get('raidLevel') == 'raidDiskPool':
pool_id_to_use = volume_pool['volumeGroupRef']
# Otherwise, choose the best available pool
else:
pool_id_to_use = pools[0]['volumeGroupRef']
group = self._client.create_snapshot_group(
label, volume['volumeRef'], pool_id_to_use,
repo_percent=percentage_capacity)
else:
group = self._client.create_snapshot_group(
label, volume['volumeRef'], repo_percent=percentage_capacity)
return group
def _get_snapshot_groups_for_volume(self, vol):
"""Find all snapshot groups associated with an E-Series volume
:param vol: An E-Series volume object
:return: A list of snapshot groups
:raise NetAppDriverException: if the list of snapshot groups cannot be
retrieved
"""
return [grp for grp in self._client.list_snapshot_groups()
if grp['baseVolume'] == vol['id']]
def _get_available_snapshot_group(self, vol):
"""Find a snapshot group that has remaining capacity for snapshots.
In order to minimize repository usage, we prioritize the snapshot
group with remaining snapshot capacity that has most recently had a
snapshot defined on it.
:param vol: An E-Series volume object
:return: A valid snapshot group that has available snapshot capacity,
or None
:raise NetAppDriverException: if the list of snapshot groups cannot be
retrieved
"""
groups_for_v = self._get_snapshot_groups_for_volume(vol)
# Filter out reserved snapshot groups
groups = [g for g in groups_for_v
if self.SNAPSHOT_VOL_COPY_SUFFIX not in g['label']]
# Filter out groups that are part of a consistency group
groups = [g for g in groups if not g['consistencyGroup']]
# Find all groups with free snapshot capacity
groups = [group for group in groups if group.get('snapshotCount') <
self.MAX_SNAPSHOT_COUNT]
# Order by the last defined snapshot on the group
if len(groups) > 1:
group_by_id = {g['id']: g for g in groups}
snap_imgs = list()
for group in groups:
try:
snap_imgs.append(
self._get_latest_image_in_snapshot_group(group['id']))
except exception.NotFound:
pass
snap_imgs = sorted(snap_imgs, key=lambda x: x['pitSequenceNumber'])
if snap_imgs:
# The newest image
img = snap_imgs[-1]
return group_by_id[img['pitGroupRef']]
else:
return groups[0] if groups else None
# Skip the snapshot image checks if there is only one snapshot group
elif groups:
return groups[0]
else:
return None
def _create_es_snapshot_for_clone(self, vol):
group_name = (utils.convert_uuid_to_es_fmt(uuid.uuid4()) +
self.SNAPSHOT_VOL_COPY_SUFFIX)
return self._create_es_snapshot(vol, group_name)
def _create_es_snapshot(self, vol, group_name=None):
snap_grp, snap_image = None, None
try:
snap_grp = self._get_available_snapshot_group(vol)
# If a snapshot group is not available, create one if possible
if snap_grp is None:
snap_groups_for_vol = self._get_snapshot_groups_for_volume(
vol)
# We need a reserved snapshot group
if (group_name is not None and
(self.SNAPSHOT_VOL_COPY_SUFFIX in group_name)):
# First we search for an existing reserved group
for grp in snap_groups_for_vol:
if grp['label'].endswith(
self.SNAPSHOT_VOL_COPY_SUFFIX):
snap_grp = grp
break
# No reserved group exists, so we create it
if (snap_grp is None and
(len(snap_groups_for_vol) <
self.MAX_SNAPSHOT_GROUP_COUNT)):
snap_grp = self._create_snapshot_group(group_name,
vol)
# Ensure we don't exceed the snapshot group limit
elif (len(snap_groups_for_vol) <
(self.MAX_SNAPSHOT_GROUP_COUNT -
self.RESERVED_SNAPSHOT_GROUP_COUNT)):
label = group_name if group_name is not None else (
utils.convert_uuid_to_es_fmt(uuid.uuid4()))
snap_grp = self._create_snapshot_group(label, vol)
LOG.info("Created snap grp with label %s.", label)
# We couldn't retrieve or create a snapshot group
if snap_grp is None:
raise exception.SnapshotLimitExceeded(
allowed=(self.MAX_SNAPSHOT_COUNT *
(self.MAX_SNAPSHOT_GROUP_COUNT -
self.RESERVED_SNAPSHOT_GROUP_COUNT)))
return self._client.create_snapshot_image(
snap_grp['id'])
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
if snap_image is None and snap_grp:
self._delete_snapshot_group(snap_grp['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: The Cinder snapshot
:param group_name: An optional label for the snapshot group
:returns: An E-Series snapshot image
"""
os_vol = snapshot['volume']
vol = self._get_volume(os_vol['name_id'])
snap_image = cinder_utils.synchronized(vol['id'])(
self._create_es_snapshot)(vol)
model_update = {
'provider_id': snap_image['id']
}
return model_update
def _delete_es_snapshot(self, es_snapshot):
"""Perform a soft-delete on an E-Series snapshot.
Mark the snapshot image as no longer needed, so that it can be
purged from the backend when no other snapshots are dependent upon it.
:param es_snapshot: an E-Series snapshot image
:return: None
"""
index = self._get_soft_delete_map()
snapgroup_ref = es_snapshot['pitGroupRef']
if snapgroup_ref in index:
bitset = na_utils.BitSet(int((index[snapgroup_ref])))
else:
bitset = na_utils.BitSet(0)
images = [img for img in self._client.list_snapshot_images() if
img['pitGroupRef'] == snapgroup_ref]
for i, image in enumerate(sorted(images, key=lambda x: x[
'pitSequenceNumber'])):
if(image['pitSequenceNumber'] == es_snapshot[
'pitSequenceNumber']):
bitset.set(i)
break
index_update, keys_to_del = (
self._cleanup_snapshot_images(images, bitset))
self._merge_soft_delete_changes(index_update, keys_to_del)
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
try:
es_snapshot = self._get_snapshot(snapshot)
except exception.NotFound:
LOG.warning("Snapshot %s already deleted.", snapshot['id'])
else:
os_vol = snapshot['volume']
vol = self._get_volume(os_vol['name_id'])
cinder_utils.synchronized(vol['id'])(self._delete_es_snapshot)(
es_snapshot)
def _get_soft_delete_map(self):
"""Retrieve the snapshot index from the storage backend"""
return self._client.list_backend_store(
self.SNAPSHOT_PERSISTENT_STORE_KEY)
@cinder_utils.synchronized(SNAPSHOT_PERSISTENT_STORE_LOCK)
def _merge_soft_delete_changes(self, index_update, keys_to_del):
"""Merge changes to the snapshot index and save it on the backend
This method merges provided changes into the index, locking, to ensure
that concurrent changes that don't overlap are not overwritten. No
update will occur if neither an update or keys to delete are provided.
:param index_update: a dict of keys/value pairs to update in the index
:param keys_to_del: a list of keys to purge from the index
"""
if index_update or keys_to_del:
index = self._get_soft_delete_map()
if index_update:
index.update(index_update)
if keys_to_del:
for key in keys_to_del:
if key in index:
del index[key]
self._client.save_backend_store(
self.SNAPSHOT_PERSISTENT_STORE_KEY, index)
def _cleanup_snapshot_images(self, images, bitset):
"""Delete snapshot images that are marked for removal from the backend.
This method will iterate over all snapshots (beginning with the
oldest), that are defined on the same snapshot group as the provided
snapshot image. If the snapshot is marked for deletion, it will be
purged from the backend. Otherwise, the method will return because
no further snapshots can be purged.
The bitset will be updated based on the return from this method.
Any updates to the index will be provided as a dict, and any keys
to be removed from the index should be returned as (dict, list).
:param images: a list of E-Series snapshot images
:param bitset: a bitset representing the snapshot images that are
no longer needed on the backend (and may be deleted when possible)
:return (dict, list): a tuple containing a dict of updates for the
index and a list of keys to remove from the index
"""
snap_grp_ref = images[0]['pitGroupRef']
# All images are marked as deleted, we can delete the snapshot group
if bitset == 2 ** len(images) - 1:
try:
self._delete_snapshot_group(snap_grp_ref)
except exception.NetAppDriverException as e:
LOG.warning("Unable to remove snapshot group - %s.", e.msg)
return None, [snap_grp_ref]
else:
# Order by their sequence number, from oldest to newest
snapshots = sorted(images,
key=lambda x: x['pitSequenceNumber'])
deleted = 0
for i, snapshot in enumerate(snapshots):
if bitset.is_set(i):
self._delete_snapshot_image(snapshot)
deleted += 1
else:
# Snapshots must be deleted in order, so if the current
# snapshot is not pending deletion, we don't want to
# process any more
break
if deleted:
# Update the bitset based on the deleted snapshots
bitset >>= deleted
LOG.debug('Deleted %(count)s snapshot images from snapshot '
'group: %(grp)s.', {'count': deleted,
'grp': snap_grp_ref})
if deleted >= len(images):
try:
self._delete_snapshot_group(snap_grp_ref)
except exception.NetAppDriverException as e:
LOG.warning("Unable to remove snapshot group - %s.",
e.msg)
return None, [snap_grp_ref]
return {snap_grp_ref: repr(bitset)}, None
def _delete_snapshot_group(self, group_id):
try:
self._client.delete_snapshot_group(group_id)
except eseries_exc.WebServiceException as e:
raise exception.NetAppDriverException(e.msg)
def _delete_snapshot_image(self, es_snapshot):
"""Remove a snapshot image from the storage backend
If a snapshot group has no remaining snapshot images associated with
it, it will be deleted as well. When the snapshot is deleted,
any snapshot volumes that are associated with it will be orphaned,
so they are also deleted.
:param es_snapshot: An E-Series snapshot image
:param snapshot_volumes: Snapshot volumes associated with the snapshot
"""
self._client.delete_snapshot_image(es_snapshot['id'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
from common.python.common.consts import MemberRole
from common.python.federation import roles_to_parties
from common.python.utils import log_utils
from kernel.components.evaluation.param import EvaluateParam
from kernel.transfer.variables.transfer_class.common_transfer_variable import CommonTransferVariable
from kernel.utils import consts
from kernel.utils.component_properties import ComponentProperties
from kernel.utils.data_util import header_alignment
from kernel.utils.param_extract import ParamExtract
LOGGER = log_utils.get_logger()
class ModelBase(object):
def __init__(self):
self.model_output = None
self.mode = None
self.role = None
self.member_id = None
self.mix_promoter_member_id = None
self.data_output = None
self.model_param = None
self.transfer_variable = None
self.flowid = ''
self.taskid = ''
self.need_one_vs_rest = False
self.tracker = None
self.cv_fold = 0
self.validation_freqs = None
self.component_properties = ComponentProperties()
# whether save out data to data set
self.save_dataset = False
# Does the model result need to be additionally saved to the storage system (such as CK)?
# Note: By default, it has been saved uniformly to the mysql `model output` table
self.model_save_to_storage = False
self.is_serving_model = False
# The component with the data set output is used to display the name
self.show_name = None
self.source_type = None
self.federated_learning_mode = None
self.federated_learning_type = None
self._summary = dict()
self._align_cache = dict()
self.provider_master = False
self.provider_other_inner_id = None
self.provider_master_inner_id = None
self.provider_inner_id = None
# common variable transfer
self.common_transfer_variable = CommonTransferVariable()
def set_show_name(self, name):
self.show_name = name
def _init_runtime_parameters(self, component_parameters):
param_extracter = ParamExtract()
param = param_extracter.parse_param_from_config(self.model_param, component_parameters)
param.check()
LOGGER.debug("final parameters====>{}".format(param.__dict__))
componentProperties = self.component_properties.parse_component_param(component_parameters, param)
self.role = componentProperties.role
self.member_id = componentProperties.local_member_id
self.mix_promoter_member_id = componentProperties.mix_promoter_member_id
self.provider_master = componentProperties.provider_master
self.provider_other_inner_id = componentProperties.provider_other_inner_id
self.provider_master_inner_id = componentProperties.provider_master_inner_id
self.provider_inner_id = componentProperties.provider_inner_id
self.federated_learning_mode = componentProperties.federated_learning_mode
self.federated_learning_type = componentProperties.federated_learning_type
self._init_model(param)
self.set_save_dataset_flag(param)
return param
def set_save_dataset_flag(self, param):
if hasattr(param, 'save_dataset'):
self.save_dataset = param.save_dataset
@property
def need_cv(self):
return self.component_properties.need_cv
@property
def need_run(self):
return self.component_properties.need_run
@need_run.setter
def need_run(self, value: bool):
self.component_properties.need_run = value
def _init_model(self, model):
pass
def load_model(self, model_dict):
pass
def _parse_need_run(self, model_dict, model_meta_name):
meta_obj = list(model_dict.get('model').values())[0].get(model_meta_name)
need_run = meta_obj.need_run
# self.need_run = need_run
self.component_properties.need_run = need_run
def run(self, component_parameters=None, args=None):
LOGGER.debug("component_parameters====>{}".format(component_parameters))
LOGGER.debug("args====>{}".format(args))
self._init_runtime_parameters(component_parameters)
self.component_properties.parse_dsl_args(args)
running_funcs = self.component_properties.extract_running_rules(args, self)
saved_result = []
for func, params, save_result, use_previews in running_funcs:
# for func, params in zip(todo_func_list, todo_func_params):
if use_previews:
if params:
real_param = [saved_result, params]
else:
real_param = saved_result
LOGGER.debug("func: {}".format(func))
this_data_output = func(*real_param)
saved_result = []
else:
this_data_output = func(*params)
LOGGER.debug("save_result:{}".format(saved_result))
if save_result:
saved_result.append(this_data_output)
if len(saved_result) == 1:
self.data_output = saved_result[0]
# LOGGER.debug("One data: {}".format(self.data_output.first()[1].features))
LOGGER.debug("saved_result is : {}, data_output: {}".format(saved_result, self.data_output))
def get_metrics_param(self):
return EvaluateParam(eval_type="binary",
pos_label=1)
def predict(self, data_inst):
pass
def fit(self, *args):
pass
def transform(self, data_inst):
pass
def cross_validation(self, data_inst):
pass
def one_vs_rest_fit(self, train_data=None):
pass
def one_vs_rest_predict(self, train_data):
pass
def init_validation_strategy(self, train_data=None, validate_data=None):
pass
def output_data(self):
return self.data_output
def export_model(self):
return self.model_output
def data_is_empty(self, data_instances):
count = data_instances.count()
if count < 1:
raise ValueError("data instances is empty")
return count
def set_flowid(self, flowid):
# self.flowid = '.'.join([self.taskid, str(flowid)])
self.flowid = flowid
self.set_transfer_variable()
def set_transfer_variable(self):
if self.transfer_variable is not None:
LOGGER.debug("set flowid to transfer_variable, flowid: {}".format(self.flowid))
self.transfer_variable.set_flowid(self.flowid)
def set_taskid(self, taskid):
""" taskid: jobid + component_name, reserved variable """
self.taskid = taskid
def get_metric_name(self, name_prefix):
if not self.need_cv:
return name_prefix
return '_'.join(map(str, [name_prefix, self.flowid]))
def set_tracker(self, tracker):
self.tracker = tracker
def set_predict_data_schema(self, predict_datas, schemas):
if predict_datas is None:
return predict_datas
if isinstance(predict_datas, list):
predict_data = predict_datas[0]
schema = schemas[0]
else:
predict_data = predict_datas
schema = schemas
if predict_data is not None:
predict_data.schema = {"header": ["label", "predict_result", "predict_score", "predict_detail", "type"],
"sid_name": schema.get('sid_name')}
return predict_data
def callback_metric(self, metric_name, metric_namespace, metric_meta, metric_data):
if self.need_cv:
metric_name = '.'.join([metric_name, str(self.cv_fold)])
flow_id_list = self.flowid.split('.')
LOGGER.debug("Need cv, change callback_metric, flow_id_list: {}".format(flow_id_list))
if len(flow_id_list) > 1:
curve_name = '.'.join(flow_id_list[1:])
metric_meta['curve_name'] = curve_name
else:
metric_meta['curve_name'] = metric_name
self.tracker.saveSingleMetricData(metric_name, metric_namespace, metric_meta, metric_data)
def set_cv_fold(self, cv_fold):
self.cv_fold = cv_fold
def data_instance_to_str(self, data_instances, with_label):
if data_instances is None:
return data_instances
schema = data_instances.schema
new_data_instances = data_instances.mapValues(lambda v: v.to_csv())
data_instances.schema = schema
header = ''
if schema.get('header') is not None:
header = ",".join(schema.get('header'))
if with_label:
header = schema.get('label_name') + ',' + header
schema['header'] = header
return new_data_instances
def summary(self):
return copy.deepcopy(self._summary)
def set_summary(self, new_summary):
"""
Model summary setter
Parameters
----------
new_summary: dict, summary to replace the original one
Returns
-------
"""
if not isinstance(new_summary, dict):
raise ValueError(f"summary should be of dict type, received {type(new_summary)} instead.")
self._summary = copy.deepcopy(new_summary)
def _whether_with_arbiter(self, task_config):
"""
Determine whether arbiter is involved
Parameters
----------
task_config
Returns
-------
"""
with_arbiter = False
if task_config and 'task' in task_config:
task_info = task_config['task']
if 'members' in task_info:
for member in task_info['members']:
if member['member_role'] == MemberRole.ARBITER:
with_arbiter = True
break
return with_arbiter
def _common_status_sync(self, with_arbiter):
# first step :promoter receive other member complete status
if self.role == MemberRole.PROMOTER:
self.common_transfer_variable.provider2promoter_complete_status.get()
if with_arbiter:
self.common_transfer_variable.arbiter2promoter_complete_status.get()
elif self.role == MemberRole.PROVIDER:
self.common_transfer_variable.provider2promoter_complete_status.remote("completed")
elif with_arbiter:
self.common_transfer_variable.arbiter2promoter_complete_status.remote("completed")
# second step: other member receive promoter complete status
if self.role == MemberRole.PROMOTER:
self.common_transfer_variable.promoter2provider_complete_status.remote("completed")
if with_arbiter:
self.common_transfer_variable.promoter2arbiter_complete_status.remote("completed")
elif self.role == MemberRole.PROVIDER:
self.common_transfer_variable.promoter2provider_complete_status.get()
elif with_arbiter:
self.common_transfer_variable.promoter2arbiter_complete_status.get()
def _mix_status_sync(self, with_arbiter):
# first, each promoter get each provider sub task success status
complete_flag = "completed"
if self.role == MemberRole.PROMOTER:
self.common_transfer_variable.provider2promoter_complete_status.get()
elif self.role == MemberRole.PROVIDER:
self.common_transfer_variable.provider2promoter_complete_status.remote(
complete_flag,
member_id_list=[self.mix_promoter_member_id]
)
# second,each sub task get promoter success status
if self.role == MemberRole.PROMOTER:
self.common_transfer_variable.promoter2provider_complete_status.remote(
complete_flag
)
elif self.role == MemberRole.PROVIDER:
self.common_transfer_variable.promoter2provider_complete_status.get(
member_id_list=[self.mix_promoter_member_id]
)
# third,each promoter and arbiter change success status
promoter_members = roles_to_parties([consts.PROMOTER])
if self.role == MemberRole.PROMOTER:
other_promoter_member_ids = [item_promoter.member_id for item_promoter in promoter_members
if item_promoter.member_id != self.member_id]
# remote complete status to other promoter
self.common_transfer_variable.promoter2promoter_complete_status.remote(
complete_flag,
member_id_list=other_promoter_member_ids
)
if with_arbiter:
self.common_transfer_variable.promoter2arbiter_complete_status.remote(
complete_flag
)
# get complete status from other promoter
self.common_transfer_variable.promoter2promoter_complete_status.get(
member_id_list=other_promoter_member_ids
)
elif self.role == MemberRole.ARBITER:
self.common_transfer_variable.promoter2arbiter_complete_status.get()
def status_sync(self, task_config):
"""
task status sync
Parameters
----------
task_config:dict
Returns
-------
"""
if not self.transfer_variable:
LOGGER.debug('without transfer variable, do not sync status')
return
# member count
member_count = len(task_config.get("task", {}).get("members", []))
if member_count <= 1:
LOGGER.debug(f'member count:{member_count}, do not sync status')
return
with_arbiter = self._whether_with_arbiter(task_config)
if self.federated_learning_type == consts.MIX:
self._mix_status_sync(with_arbiter)
else:
self._common_status_sync(with_arbiter)
LOGGER.debug(f'sync status complete,role:{self.role}')
@staticmethod
def extract_data(data: dict):
LOGGER.debug("In extract_data, data input: {}".format(data))
if len(data) == 0:
return data
if len(data) == 1:
return list(data.values())[0]
return data
@staticmethod
def predict_score_to_output(data_instances, predict_score, classes=None, threshold=0.5):
"""
Get predict result output
Parameters
----------
data_instances: table, data used for prediction
predict_score: table, probability scores
classes: list or None, all classes/label names
threshold: float, predict threshold, used for binary label
Returns
-------
Table, predict result
"""
# regression
if classes is None:
predict_result = data_instances.join(predict_score, lambda d, pred: [d.label, pred,
pred, {"label": pred}])
# binary
elif isinstance(classes, list) and len(classes) == 2:
class_neg, class_pos = classes[0], classes[1]
pred_label = predict_score.mapValues(lambda x: class_pos if x > threshold else class_neg)
predict_result = data_instances.mapValues(lambda x: x.label)
predict_result = predict_result.join(predict_score, lambda x, y: (x, y))
class_neg_name, class_pos_name = str(class_neg), str(class_pos)
predict_result = predict_result.join(pred_label, lambda x, y: [x[0], y, x[1],
{class_neg_name: (1 - x[1]),
class_pos_name: x[1]}])
# multi-label: input = array of predicted score of all labels
elif isinstance(classes, list) and len(classes) > 2:
# pred_label = predict_score.mapValues(lambda x: classes[x.index(max(x))])
classes = [str(val) for val in classes]
predict_result = data_instances.mapValues(lambda x: x.label)
predict_result = predict_result.join(predict_score, | |
from rlcard.utils import *
from rlcard.games.whist.utils import cards2list
import os
class Env(object):
'''
The base Env class. For all the environments in RLCard,
we should base on this class and implement as many functions
as we can.
'''
def __init__(self, config):
''' Initialize the environment
Args:
config (dict): A config dictionary. All the fields are
optional. Currently, the dictionary includes:
'seed' (int) - A environment local random seed.
'env_num' (int) - If env_num>1, the environemnt wil be run
with multiple processes. Note the implementatino is
in `vec_env.py`.
'allow_step_back' (boolean) - True if allowing
step_back.
'allow_raw_data' (boolean) - True if allow
raw obs in state['raw_obs'] and raw legal actions in
state['raw_legal_actions'].
'single_agent_mode' (boolean) - True if single agent mode,
i.e., the other players are pretrained models.
'active_player' (int) - If 'singe_agent_mode' is True,
'active_player' specifies the player that does not use
pretrained models.
There can be some game specific configurations, e.g., the
number of players in the game. These fields should start with
'game_', e.g., 'game_player_num' we specify the number of
players in the game. Since these configurations may be game-specific,
The default settings shpuld be put in the Env class. For example,
the default game configurations for Blackjack should be in
'rlcard/envs/blackjack.py'
TODO: Support more game configurations in the future.
'''
self.allow_step_back = self.game.allow_step_back = config['allow_step_back']
self.allow_raw_data = config['allow_raw_data']
self.record_action = config['record_action']
if self.record_action:
self.action_recorder = []
# Game specific configurations
# Currently only support blackjack
# TODO support game configurations for all the games
supported_envs = ['blackjack']
if self.name in supported_envs:
_game_config = self.default_game_config.copy()
for key in config:
if key in _game_config:
_game_config[key] = config[key]
self.game.configure(_game_config)
# Get the number of players/actions in this game
self.player_num = self.game.get_player_num()
self.action_num = self.game.get_action_num()
# A counter for the timesteps
self.timestep = 0
# Modes
self.single_agent_mode = config['single_agent_mode']
self.active_player = config['active_player']
# Load pre-trained models if single_agent_mode=True
if self.single_agent_mode:
self.model = self._load_model()
# If at least one pre-trained agent needs raw data, we set self.allow_raw_data = True
for agent in self.model.agents:
if agent.use_raw:
self.allow_raw_data = True
break
# Set random seed, default is None
self._seed(config['seed'])
def reset(self):
'''
Reset environment in single-agent mode
Call `_init_game` if not in single agent mode
'''
if not self.single_agent_mode:
return self._init_game()
while True:
state, player_id = self.game.init_game()
while not player_id == self.active_player:
self.timestep += 1
action, _ = self.model.agents[player_id].eval_step(
self._extract_state(state))
if not self.model.agents[player_id].use_raw:
action = self._decode_action(action)
state, player_id = self.game.step(action)
if not self.game.is_over():
break
return self._extract_state(state)
def step(self, action, raw_action=False):
''' Step forward
Args:
action (int): The action taken by the current player
raw_action (boolean): True if the action is a raw action
Returns:
(tuple): Tuple containing:
(dict): The next state
(int): The ID of the next player
'''
if not raw_action:
action = self._decode_action(action)
if self.single_agent_mode:
return self._single_agent_step(action)
self.timestep += 1
# Record the action for human interface
if self.record_action:
self.action_recorder.append([self.get_player_id(), action])
next_state, player_id = self.game.step(action)
return self._extract_state(next_state), player_id
def step_back(self):
''' Take one step backward.
Returns:
(tuple): Tuple containing:
(dict): The previous state
(int): The ID of the previous player
Note: Error will be raised if step back from the root node.
'''
if not self.allow_step_back:
raise Exception(
'Step back is off. To use step_back, please set allow_step_back=True in rlcard.make')
if not self.game.step_back():
return False
player_id = self.get_player_id()
state = self.get_state(player_id)
return state, player_id
def set_agents(self, agents):
'''
Set the agents that will interact with the environment.
This function must be called before `run`.
Args:
agents (list): List of Agent classes
'''
if self.single_agent_mode:
raise ValueError(
'Setting agent in single agent mode or human mode is not allowed.')
self.agents = agents
# If at least one agent needs raw data, we set self.allow_raw_data = True
for agent in self.agents:
if agent.use_raw:
self.allow_raw_data = True
break
def run(self, is_training=False):
'''
Run a complete game, either for evaluation or training RL agent.
Args:
is_training (boolean): True if for training purpose.
Returns:
(tuple) Tuple containing:
(list): A list of trajectories generated from the environment.
(list): A list payoffs. Each entry corresponds to one player.
Note: The trajectories are 3-dimension list. The first dimension is for different players.
The second dimension is for different transitions. The third dimension is for the contents of each transiton
'''
if self.single_agent_mode:
raise ValueError('Run in single agent not allowed.')
trajectories = [[] for _ in range(self.player_num)]
state, player_id = self.reset()
player_hand = [0, 0, 0, 0]
difficulty = 0
#print(self.game.trump_suit)
if not is_training:
for player_id in range(self.player_num):
#print(cards2list(self.game.players[player_id].hand))
for card in self.game.players[player_id].hand:
if card.suit == self.game.trump_suit:
if card.rank == 'A':
player_hand[player_id] += 27
elif card.rank == 'K':
player_hand[player_id] += 26
elif card.rank == 'Q':
player_hand[player_id] += 25
elif card.rank == 'J':
player_hand[player_id] += 24
elif card.rank == 'T':
player_hand[player_id] += 23
else:
player_hand[player_id] += (int(card.rank) + 13)
else:
if card.rank == 'A':
player_hand[player_id] += 14
elif card.rank == 'K':
player_hand[player_id] += 13
elif card.rank == 'Q':
player_hand[player_id] += 12
elif card.rank == 'J':
player_hand[player_id] += 11
elif card.rank == 'T':
player_hand[player_id] += 10
else:
player_hand[player_id] += int(card.rank)
#print(player_hand)
score_1 = max(player_hand[0], player_hand[2])
score_2 = max(player_hand[1], player_hand[3])
difficulty = score_1 - score_2
# Loop to play the game
trajectories[player_id].append(state)
while not self.is_over():
# Agent plays
if not is_training:
action, _=self.agents[player_id].eval_step(state)
else:
action=self.agents[player_id].step(state)
# Environment steps
next_state, next_player_id=self.step(
action, self.agents[player_id].use_raw)
# Save action
trajectories[player_id].append(action)
# Set the state and player
state=next_state
player_id=next_player_id
# Save state.
if not self.game.is_over():
trajectories[player_id].append(state)
# Add a final state to all the players
for player_id in range(self.player_num):
state=self.get_state(player_id)
trajectories[player_id].append(state)
# Payoffs
payoffs=self.get_payoffs()
# print("start")
# print(trajectories)
# print()
# Reorganize the trajectories
trajectories=reorganize(trajectories, payoffs)
return trajectories, payoffs, difficulty
def run_example(self, log_location, is_training=False):
'''
Run a complete game, either for evaluation or training RL agent.
Args:
is_training (boolean): True if for training purpose.
Returns:
(tuple) Tuple containing:
(list): A list of trajectories generated from the environment.
(list): A list payoffs. Each entry corresponds to one player.
Note: The trajectories are 3-dimension list. The first dimension is for different players.
The second dimension is for different transitions. The third dimension is for the contents of each transiton
'''
if self.single_agent_mode:
raise ValueError('Run in single agent not allowed.')
trajectories=[[] for _ in range(self.player_num)]
state, player_id=self.reset()
player_hand = [0, 0, 0, 0]
if not is_training:
for player_id in range(self.player_num):
#print(cards2list(self.game.players[player_id].hand))
for card in self.game.players[player_id].hand:
if card.suit == self.game.trump_suit:
if card.rank == 'A':
player_hand[player_id] += 27
elif card.rank == 'K':
player_hand[player_id] += 26
elif card.rank == 'Q':
player_hand[player_id] += 25
elif card.rank == 'J':
player_hand[player_id] += 24
elif card.rank == 'T':
player_hand[player_id] += 23
else:
player_hand[player_id] += (int(card.rank) + 13)
else:
if card.rank == 'A':
player_hand[player_id] += 14
elif card.rank == 'K':
player_hand[player_id] += 13
elif card.rank == 'Q':
player_hand[player_id] += 12
elif card.rank == 'J':
player_hand[player_id] += 11
elif card.rank == 'T':
player_hand[player_id] += 10
else:
player_hand[player_id] += int(card.rank)
#print(player_hand)
score_1 = max(player_hand[0], player_hand[2])
score_2 = max(player_hand[1], player_hand[3])
difficulty = score_1 - score_2
# Loop to play the game
trajectories[player_id].append(state)
i=1
while not self.is_over():
# Agent plays
if not is_training:
action, _=self.agents[player_id].eval_step(state)
else:
action=self.agents[player_id].step(state)
# Environment steps
next_state, next_player_id=self.step(
action, self.agents[player_id].use_raw)
# Save action
trajectories[player_id].append(action)
if i % 4 == 0:
# print("")
# print("Player 0 hand:", cards2list(self.game.players[0].hand))
# print("Player 1 hand:", cards2list(self.game.players[1].hand))
# print("Player 2 hand:", cards2list(self.game.players[2].hand))
# print("Player 3 hand:", cards2list(self.game.players[3].hand))
# print("Lead player:", self.game.round.lead_player)
# print("Trump Suit:", self.game.trump_suit)
# print("Playing Card:", self.game.round.played_card)
# print("Played Cards:", cards2list(self.game.round.played_cards))
# print("Winner:", self.game.round.round_winner, "Winning card:", self.game.round.played_cards[self.game.round.winning_card])
# print("Score:", self.game.players[0].tricks, self.game.players[1].tricks, self.game.players[2].tricks, self.game.players[3].tricks)
with open(log_location, "a") as file_object:
file_object.write("\n")
file_object.write(
"Difficulty: " + str(difficulty) + "\n")
file_object.write(
"Player 0 hand: " + str(cards2list(self.game.players[0].hand)) + "\n")
file_object.write(
"Player 1 hand: " + str(cards2list(self.game.players[1].hand)) + "\n")
file_object.write(
"Player 2 hand: " + str(cards2list(self.game.players[2].hand)) + "\n")
file_object.write(
"Player 3 hand: " + str(cards2list(self.game.players[3].hand)) + "\n")
file_object.write("Lead player: " + \
str(self.game.round.last_lead) + "\n")
file_object.write(
"Trump Suit: " + self.game.trump_suit + "\n")
# file_object.write("Playing Card: " + self.game.round.played_card.__str__() + "\n")
| |
<reponame>dbis-ilm/grizzly<gh_stars>10-100
from grizzly.expression import ExpressionException
import unittest
import sqlite3
import re
import grizzly
from grizzly.aggregates import AggregateType
from grizzly.sqlgenerator import SQLGenerator
from grizzly.relationaldbexecutor import RelationalExecutor
class CodeMatcher(unittest.TestCase):
def matchSnipped(self, snipped, template, removeLinebreaks: bool = False):
res, mapping, reason = CodeMatcher.doMatchSnipped(snipped.strip(), template.strip(),removeLinebreaks)
if not res:
mapstr = "with mapping:\n"
for templ,tVar in mapping.items():
mapstr += f"\t{templ} -> {tVar}\n"
self.fail(f"Mismatch\nFound: {snipped}\nExpected: {template}\nReason:\t{reason}\n{mapstr}")
@staticmethod
def doMatchSnipped(snipped, template, removeLinebreaks):
pattern = re.compile(r"\$t[0-9]+")
pattern2 = re.compile("_t[0-9]+")
placeholders = pattern.findall(template)
occurences = pattern2.findall(snipped)
mapping = {}
for p,o in zip(placeholders, occurences):
if p not in mapping:
mapping[p] = o
elif p in mapping and mapping[p] != o:
return False, mapping, f"Mapping error: {p} -> {mapping[p]} exists, but {p} -> {o} found"
# if we get here, the occurences match the templates
if len(placeholders) != len(occurences):
return False, mapping, f"number of placeholders {len(placeholders)} does not match occurences {len(occurences)}"
for (k,v) in mapping.items():
template = template.replace(k,v)
templateClean = template.replace("\n","").replace(" ","").lower()
snippedClean = snipped.replace("\n","").replace(" ","").lower()
matches = snippedClean == templateClean
return matches, mapping, "Snipped does not match template" if not matches else ""
class DataFrameTest(CodeMatcher):
def setUp(self):
c = sqlite3.connect("grizzly.db")
gen = SQLGenerator("sqlite")
executor = RelationalExecutor(c, gen)
grizzly.use(executor)
def tearDown(self):
grizzly.close()
def test_groupby(self):
df = grizzly.read_table("events")
g = df.groupby(["theyear","actor1name"])
a = g.agg(col="actor2name", aggType=AggregateType.MEAN)
# expected = "select $t0.theyear, $t0.actor1name, avg($t0.actor2name) from events $t0 group by $t0.theyear, $t0.actor1name"
expected = "select $t1.theyear, $t1.actor1name, avg($t1.actor2name) from (select * from events $t0) $t1 group by $t1.theyear, $t1.actor1name"
actual = a.generateQuery()
self.matchSnipped(actual, expected)
def test_Having(self):
df = grizzly.read_table("events")
g = df.groupby(["theyear","actor1name"])
a = g.agg(col="actor2name", aggType=AggregateType.COUNT,alias="cnt_actor")
f = a.filter(a["cnt_actor"] > 2)
expected = "select $t1.theyear, $t1.actor1name, count($t1.actor2name) as cnt_actor from (select * from events $t0) $t1 group by $t1.theyear, $t1.actor1name having cnt_actor > 2"
actual = f.generateQuery()
self.matchSnipped(actual, expected)
def test_HavingExec(self):
df = grizzly.read_table("events")
g = df.groupby(["actor1name"])
a = g.agg(col="actor2name", aggType=AggregateType.COUNT,alias="cnt_actor")
f = a.filter(a["cnt_actor"] > 2)
actual = f.collect()
self.assertEqual(len(actual), 872)
failedTuples = []
for (actor1name,cnt_actor) in actual:
if cnt_actor > 2:
failedTuples.append( (actor1name, cnt_actor) )
if len(failedTuples) <= 0:
msg = ",".join(failedTuples)
self.fail(f"tuples not matching having clause: {msg}")
def test_groupByTableAggComputedCol(self):
df = grizzly.read_table("events")
g = df.groupby(["theyear","actor1name"])
g["cnt_actor"] = g.count("actor2name") # FIXME: should not trigger execution but add the function to projection
g["min_actor"] = g.min(g.actor2name)
expected = "select $t1.theyear, $t1.actor1name, count($t1.actor2name) as cnt_actor, min($t1.actor2name) as min_actor from (select * from events $t0) $t1 group by $t1.theyear, $t1.actor1name"
actual = g.generateQuery()
self.matchSnipped(actual, expected)
def test_HavingTwice(self):
df = grizzly.read_table("events")
g = df.groupby(["theyear","actor1name"])
a = g.agg(col="actor2name", aggType=AggregateType.COUNT,alias="cnt_actor")
a = a.agg(col="actor2name", aggType=AggregateType.MIN,alias="min_actor")
f = a.filter(a["cnt_actor"] > 2)
f = f.filter(a["min_actor"] > 10)
expected = "select $t1.theyear, $t1.actor1name, count($t1.actor2name) as cnt_actor, min($t1.actor2name) as min_actor from (select * from events $t0) $t1 group by $t1.theyear, $t1.actor1name having cnt_actor > 2 and min_actor > 10"
actual = f.generateQuery()
self.matchSnipped(actual, expected)
def test_HavingTwiceExpr(self):
df = grizzly.read_table("events")
g = df.groupby(["theyear","actor1name"])
a = g.agg(col="actor2name", aggType=AggregateType.COUNT,alias="cnt_actor")
a = a.agg(col="actor2name", aggType=AggregateType.MIN,alias="min_actor")
f = a.filter((a["cnt_actor"] > 2) & (a["min_actor"] > 10))
expected = "select $t1.theyear, $t1.actor1name, count($t1.actor2name) as cnt_actor, min($t1.actor2name) as min_actor from (select * from events $t0) $t1 group by $t1.theyear, $t1.actor1name having cnt_actor > 2 and min_actor > 10"
actual = f.generateQuery()
self.matchSnipped(actual, expected)
def test_ComputedExpr(self):
df = grizzly.read_table("events")
df = df[df.globaleventid == 476829606]
df["newcol"] = df.theyear + df.monthyear
df = df[[df.newcol, df.theyear, df.monthyear]]
res = df.collect()
self.assertEqual(len(res), 1)
self.assertEqual(len(res[0]), 3)
theYear = 2015
monthYear = 201510
self.assertEqual(res[0][1], theYear)
self.assertEqual(res[0][2], monthYear)
self.assertEqual(res[0][0], theYear + monthYear)
def test_New(self):
df = grizzly.read_table("events")
df = df["a"]
df = df[df["a"] == 2]
actual = df.generateQuery()
expected = "select * from (select $t1.a from (select * from events $t0) $t1) $t2 where $t2.a = 2"
self.matchSnipped(actual, expected)
def test_selectStar(self):
df = grizzly.read_table("events")
actual = df.generateQuery()
expected = "select * from events $t0"
self.matchSnipped(actual, expected)
def test_selectCountStar(self):
df = grizzly.read_table("events")
actual = df.count()
self.assertEqual(actual, 30354)
def test_selectStarFilter(self):
df = grizzly.read_table("events")
df = df[df['globaleventid'] == 468189636]
actual = df.generateQuery()
expected = "select * from (select * from events $t0) $t1 where $t1.globaleventid = 468189636"
self.matchSnipped(actual, expected)
def test_selectStarFilterString(self):
df = grizzly.read_table("events")
df = df[df['globaleventid'] == 'abc']
actual = df.generateQuery()
expected = "select * from (select * from events $t0) $t1 where $t1.globaleventid = 'abc'"
self.matchSnipped(actual, expected)
def test_selectColumnWithFilter(self):
df = grizzly.read_table("events")
df = df[df['globaleventid'] == 468189636]
df = df['goldsteinscale']
actual = df.generateQuery()
# expected = "select $t0.goldsteinscale from events $t0 where $t0.globaleventid = 468189636"
expected = "select $t2.goldsteinscale from (select * from (select * from events $t0) $t1 where $t1.globaleventid = 468189636) $t2"
self.matchSnipped(actual, expected)
def test_selectCountCol(self):
df = grizzly.read_table("events")
cnt = df.count('actor2name')
self.assertGreater(cnt, 0)
def test_selectStarGroupBy(self):
df = grizzly.read_table("events")
df = df[df['globaleventid'] == '468189636']
g = df.groupby(["theyear","monthyear"])
actual = g.generateQuery()
expected = "select $t2.theyear, $t2.monthyear from (select * from (select * from events $t0) $t1 where $t1.globaleventid = '468189636') $t2 group by $t2.theyear, $t2.monthyear"
self.matchSnipped(actual, expected)
def test_groupByComputedCol(self):
from grizzly.generator import GrizzlyGenerator
oldGen = GrizzlyGenerator._backend.queryGenerator
newGen = SQLGenerator("postgresql")
GrizzlyGenerator._backend.queryGenerator = newGen
def mymod(s: str) -> int:
return len(s) % 2
df = grizzly.read_table("nation")
df["computed"] = df[df.n_name].map(mymod)
df = df.groupby("computed")
df = df.agg(col = "*", aggType = AggregateType.COUNT)
actual = df.generateQuery()
sql = "select computed, count(*) from (select *,mymod($t0.n_name) as computed from nation $t0) $t1 group by computed"
expected = f"""create or replace function mymod(s varchar(1024)) returns int language plpython3u as 'return len(s) % 2' parallel safe;{sql}"""
GrizzlyGenerator._backend.queryGenerator = oldGen
self.matchSnipped(actual, expected)
def test_groupByWithAggTwice(self):
df = grizzly.read_table("events")
df = df[df['globaleventid'] == 476829606]
g = df.groupby(["theyear","monthyear"])
agged = g.agg(col="actor2geo_type", aggType=AggregateType.COUNT)
aggActual = agged.generateQuery()
aggExpected = "select $t2.theyear, $t2.monthyear, count($t2.actor2geo_type) from (select * from (select * from events $t0) $t1 where $t1.globaleventid = 476829606) $t2 group by $t2.theyear, $t2.monthyear"
self.matchSnipped(aggActual, aggExpected)
def test_groupByAggGroupCol(self):
df = grizzly.read_table("events")
df = df[df['globaleventid'] == 476829606]
g = df.groupby(["theyear","monthyear"])
cnt = g.count("monthyear", "cnt")
# expected = "select count($t2.monthyear) as cnt from (select $t1.theyear, $t1.monthyear from (select * from (select * from events $t3) $t0 where $t0.globaleventid = 476829606) $t1 group by $t1.theyear, $t1.monthyear) $t2"
# self.matchSnipped(actual, expected)
self.assertEqual(cnt, 1)
def test_groupByAggGroupColCode(self):
df = grizzly.read_table("events")
df = df[df['globaleventid'] == 476829606]
g = df.groupby(["theyear","monthyear"])
actual = g.agg(col="monthyear", aggType=AggregateType.COUNT, alias="cnt").generateQuery()
expected = "select count($t2.monthyear) as cnt from (select $t1.theyear, $t1.monthyear from (select * from (select * from events $t3) $t0 where $t0.globaleventid = 476829606) $t1 group by $t1.theyear, $t1.monthyear) $t2"
self.matchSnipped(actual, expected)
def test_groupByAgg(self):
df = grizzly.read_table("events")
df = df[df['globaleventid'] == 476829606]
g = df.groupby(["theyear","monthyear"])
a = g.count("actor1name", "cnt")
# print(f"cnt: {a}")
self.assertEquals(len(a.collect()),1)
def test_groupByAggLimit(self):
df = grizzly.read_table("events")
df1 = df[(df.globaleventid < 470259271) & (df.actor1name != None)]
df1 = df1.groupby(df1.actor1name)
df1 = df1.count(df1.actor2name, alias="cnt_actor2")
df1 = df1[:2]
actual = df1.generateQuery()
expected = "select $t3.* from (select $t2.actor1name, count($t2.actor2name) as cnt_actor2 from (select * from (select * from events $t0) $t1 where $t1.globaleventid < 470259271 and $t1.actor1name is not null) $t2 group by $t2.actor1name) $t3 LIMIT 2"
self.matchSnipped(actual, expected)
def test_groupByCountGroups(self):
df = grizzly.read_table("events")
g = df.groupby("theyear")
a = g.count("theyear")
self.assertEqual(a, 3)
def test_joinTest(self):
df = grizzly.read_table("events")
df = df[df['globaleventid'] == 470259271]
df2 = grizzly.read_table("events")
joined = df.join(other = df2, on=["globaleventid", "globaleventid"], how = "inner")
actual = joined.generateQuery()
# expected = "SELECT * FROM events $t1 inner join events $t2 ON $t1.globaleventid = $t2.globaleventid where $t1.globaleventid = 470259271"
expected = "select * from (select * from (select * from events $t0) $t1 where $t1.globaleventid = 470259271) $t4 inner join (select * from events $t2) $t5 on $t4.globaleventid = $t5.globaleventid"
self.matchSnipped(actual, expected)
# self.assertGreater(joined.count(), 0)
def test_complexJoin(self):
df1 = grizzly.read_table("t1")
df2 = grizzly.read_table("t2")
j = df1.join(df2, on = (df1['a'] == df2['b']) & (df1['c'] <= df2['d']) , how="left outer")
# expected = "SELECT * FROM t1 $t0 LEFT OUTER JOIN t2 $t2 ON $t0.a = $t2.b AND $t0.c <= $t2.d".lower()
expected = "select * from (select * from t1 $t1) $t3 left outer join (select * from t2 $t2) $t4 on $t3.a = $t4.b and $t3.c <= $t4.d"
| |
<filename>tools/idl_parser/idl_parser.py
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parser for Web IDL."""
#
# IDL Parser
#
# The parser uses the PLY yacc library to build a set of parsing rules based
# on Web IDL.
#
# Web IDL, and Web IDL grammar can be found at:
# http://heycam.github.io/webidl/
# PLY can be found at:
# http://www.dabeaz.com/ply/
#
# The parser generates a tree by recursively matching sets of items against
# defined patterns. When a match is made, that set of items is reduced
# to a new item. The new item can provide a match for parent patterns.
# In this way an AST is built (reduced) depth first.
#
#
# Disable check for line length and Member as Function due to how grammar rules
# are defined with PLY
#
# pylint: disable=R0201
# pylint: disable=C0301
from __future__ import print_function
import os.path
import sys
import time
# Can't use relative imports if we don't have a parent package.
if __package__:
from .idl_lexer import IDLLexer
from .idl_node import IDLAttribute, IDLNode
else:
from idl_lexer import IDLLexer
from idl_node import IDLAttribute, IDLNode
SRC_DIR = os.path.abspath(os.path.dirname(__file__))
# Preserve sys.path[0] as is.
# https://docs.python.org/3/library/sys.html?highlight=path[0]#sys.path
sys.path.insert(1, os.path.join(SRC_DIR, os.pardir, os.pardir, 'third_party'))
from ply import lex
from ply import yacc
#
# ERROR_REMAP
#
# Maps the standard error formula into a more friendly error message.
#
ERROR_REMAP = {
'Unexpected ")" after "(".' : 'Empty argument list.',
'Unexpected ")" after ",".' : 'Missing argument.',
'Unexpected "}" after ",".' : 'Trailing comma in block.',
'Unexpected "}" after "{".' : 'Unexpected empty block.',
'Unexpected comment after "}".' : 'Unexpected trailing comment.',
'Unexpected "{" after keyword "enum".' : 'Enum missing name.',
'Unexpected "{" after keyword "struct".' : 'Struct missing name.',
'Unexpected "{" after keyword "interface".' : 'Interface missing name.',
}
_EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = [
'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs']
def Boolean(val):
"""Convert to strict boolean type."""
if val:
return True
return False
def ListFromConcat(*items):
"""Generate list by concatenating inputs"""
itemsout = []
for item in items:
if item is None:
continue
if type(item) is not type([]):
itemsout.append(item)
else:
itemsout.extend(item)
return itemsout
def ExpandProduction(p):
if type(p) == list:
return '[' + ', '.join([ExpandProduction(x) for x in p]) + ']'
if type(p) == IDLNode:
return 'Node:' + str(p)
if type(p) == IDLAttribute:
return 'Attr:' + str(p)
if type(p) == str:
return 'str:' + p
return '%s:%s' % (p.__class__.__name__, str(p))
# TokenTypeName
#
# Generate a string which has the type and value of the token.
#
def TokenTypeName(t):
if t.type == 'SYMBOL':
return 'symbol %s' % t.value
if t.type in ['HEX', 'INT', 'OCT', 'FLOAT']:
return 'value %s' % t.value
if t.type == 'string' :
return 'string "%s"' % t.value
if t.type == 'SPECIAL_COMMENT':
return 'comment'
if t.type == t.value:
return '"%s"' % t.value
if t.type == ',':
return 'Comma'
if t.type == 'identifier':
return 'identifier "%s"' % t.value
return 'keyword "%s"' % t.value
# TODO(bashi): Consider moving this out of idl_parser.
def ExtractSpecialComment(comment):
if not comment.startswith('/**'):
raise ValueError('Special comment must start with /**')
if not comment.endswith('*/'):
raise ValueError('Special comment must end with */')
# Remove comment markers
lines = []
for line in comment[2:-2].split('\n'):
# Remove characters until start marker for this line '*' if found
# otherwise it will be blank.
offs = line.find('*')
if offs >= 0:
line = line[offs + 1:].rstrip()
else:
# TODO(bashi): We may want to keep |line| as is.
line = ''
lines.append(line)
return '\n'.join(lines)
# There are two groups of ExtendedAttributes.
# One group can apply to types (It is said "applicable to types"),
# but the other cannot apply to types.
# This function is intended to divide ExtendedAttributes into those 2 groups.
# For more details at
# https://heycam.github.io/webidl/#extended-attributes-applicable-to-types
def DivideExtAttrsIntoApplicableAndNonApplicable(extended_attribute_list):
if not extended_attribute_list:
return [[], []]
else:
applicable_to_types = []
non_applicable_to_types = []
for ext_attribute in extended_attribute_list.GetChildren():
if ext_attribute.GetName() in _EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES:
applicable_to_types.append(ext_attribute)
else:
non_applicable_to_types.append(ext_attribute)
return [applicable_to_types, non_applicable_to_types]
#
# IDL Parser
#
# The Parser inherits the from the Lexer to provide PLY with the tokenizing
# definitions. Parsing patterns are encoded as functions where p_<name> is
# is called any time a patern matching the function documentation is found.
# Paterns are expressed in the form of:
# """ <new item> : <item> ....
# | <item> ...."""
#
# Where new item is the result of a match against one or more sets of items
# separated by the "|".
#
# The function is called with an object 'p' where p[0] is the output object
# and p[n] is the set of inputs for positive values of 'n'. Len(p) can be
# used to distinguish between multiple item sets in the pattern.
#
# The rules can look cryptic at first, but there are a few standard
# transforms from the CST to AST. With these in mind, the actions should
# be reasonably legible.
#
# * Ignore production
# Discard this branch. Primarily used when one alternative is empty.
#
# Sample code:
# if len(p) > 1:
# p[0] = ...
# # Note no assignment if len(p) == 1
#
# * Eliminate singleton production
# Discard this node in the CST, pass the next level down up the tree.
# Used to ignore productions only necessary for parsing, but not needed
# in the AST.
#
# Sample code:
# p[0] = p[1]
#
# * Build node
# The key type of rule. In this parser, produces object of class IDLNode.
# There are several helper functions:
# * BuildProduction: actually builds an IDLNode, based on a production.
# * BuildAttribute: builds an IDLAttribute, which is a temporary
# object to hold a name-value pair, which is then
# set as a Property of the IDLNode when the IDLNode
# is built.
# * BuildNamed: Same as BuildProduction, and sets the 'NAME' property.
# * BuildTrue: BuildAttribute with value True, for flags.
#
# Sample code:
# # Build node of type NodeType, with value p[1], and children.
# p[0] = self.BuildProduction('NodeType', p, 1, children)
#
# # Build named node of type NodeType, with name and value p[1].
# # (children optional)
# p[0] = self.BuildNamed('NodeType', p, 1)
#
# # Make a list
# # Used if one node has several children.
# children = ListFromConcat(p[2], p[3])
# p[0] = self.BuildProduction('NodeType', p, 1, children)
#
# # Also used to collapse the right-associative tree
# # produced by parsing a list back into a single list.
# """Foos : Foo Foos
# |"""
# if len(p) > 1:
# p[0] = ListFromConcat(p[1], p[2])
#
# # Add children.
# # Primarily used to add attributes, produced via BuildTrue.
# # p_StaticAttribute
# """StaticAttribute : STATIC Attribute"""
# p[2].AddChildren(self.BuildTrue('STATIC'))
# p[0] = p[2]
#
# For more details on parsing refer to the PLY documentation at
# http://www.dabeaz.com/ply/
#
# The parser is based on the Web IDL standard. See:
# http://heycam.github.io/webidl/#idl-grammar
#
# Productions with a fractional component in the comment denote additions to
# the Web IDL spec, such as allowing string list in extended attributes.
class IDLParser(object):
def p_Definitions(self, p):
"""Definitions : SpecialComments ExtendedAttributeList Definition Definitions
| ExtendedAttributeList Definition Definitions
| """
if len(p) > 4:
special_comments_and_attribs = ListFromConcat(p[1], p[2])
p[3].AddChildren(special_comments_and_attribs)
p[0] = ListFromConcat(p[3], p[4])
elif len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
def p_Definition(self, p):
"""Definition : CallbackOrInterfaceOrMixin
| Namespace
| Partial
| Dictionary
| Enum
| Typedef
| IncludesStatement"""
p[0] = p[1]
# Error recovery for definition
def p_DefinitionError(self, p):
"""Definition : error ';'"""
p[0] = self.BuildError(p, 'Definition')
def p_ArgumentNameKeyword(self, p):
"""ArgumentNameKeyword : ASYNC
| ATTRIBUTE
| CALLBACK
| CONST
| CONSTRUCTOR
| DELETER
| DICTIONARY
| ENUM
| GETTER
| INCLUDES
| INHERIT
| INTERFACE
| ITERABLE
| MAPLIKE
| NAMESPACE
| PARTIAL
| REQUIRED
| SETLIKE
| SETTER
| STATIC
| STRINGIFIER
| TYPEDEF
| UNRESTRICTED"""
p[0] = p[1]
def p_CallbackOrInterfaceOrMixin(self, p):
"""CallbackOrInterfaceOrMixin : CALLBACK CallbackRestOrInterface
| INTERFACE InterfaceOrMixin"""
p[0] = p[2]
def p_InterfaceOrMixin(self, p):
"""InterfaceOrMixin : InterfaceRest
| MixinRest"""
p[0] = p[1]
def p_InterfaceRest(self, p):
"""InterfaceRest : identifier Inheritance '{' InterfaceMembers '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 1, ListFromConcat(p[2], p[4]))
# Error recovery for interface.
def p_InterfaceRestError(self, p):
"""InterfaceRest : identifier Inheritance '{' error"""
p[0] = self.BuildError(p, 'Interface')
def p_Partial(self, p):
"""Partial : PARTIAL PartialDefinition"""
p[2].AddChildren(self.BuildTrue('PARTIAL'))
p[0] = p[2]
# Error recovery for Partial
def p_PartialError(self, p):
"""Partial : PARTIAL error"""
p[0] = self.BuildError(p, 'Partial')
def p_PartialDefinition(self, p):
"""PartialDefinition : INTERFACE PartialInterfaceOrPartialMixin
| PartialDictionary
| Namespace"""
if len(p) > 2:
p[0] = | |
'Right Arm': 46,
'Left Leg': 46,
'Right Leg': 46,
'Chest': 46,
'Abdomen': 46
}
participant_settingsWarm = {
'Left Face': 40,
'Right Face': 40,
'Left Arm': 40,
'Right Arm': 40,
'Left Leg': 40,
'Right Leg': 40,
'Chest': 40,
'Abdomen': 40
}
else:
dlg1 = gui.fileOpenDlg(tryFilePath="", tryFileName="", prompt="Select participant calibration file (*_task-Calibration_participants.tsv)", allowed="Calibration files (*.tsv)")
if dlg1!=None:
if "_task-Calibration_participants.tsv" in dlg1[0]:
# Read in participant info csv and convert to a python dictionary
a = pd.read_csv(dlg1[0], delimiter='\t', index_col=0, header=0, squeeze=True)
if a.shape == (1,39):
participant_settingsHeat = {}
participant_settingsWarm = {}
p_info = [dict(zip(a.iloc[i].index.values, a.iloc[i].values)) for i in range(len(a))][0]
expInfo['subject number'] = p_info['participant_id']
expInfo['gender'] = p_info['gender']
expInfo['handedness'] = p_info['handedness']
# For session 1
bodySites = p_info['calibration_order'][0:4]
# For session 2
# bodySites = p_info['calibration_order'][4:9]
# Heat Settings
participant_settingsHeat['Left Face'] = p_info['leftface_ht']
participant_settingsHeat['Right Face'] = p_info['rightface_ht']
participant_settingsHeat['Left Arm'] = p_info['leftarm_ht']
participant_settingsHeat['Right Arm'] = p_info['rightarm_ht']
participant_settingsHeat['Left Leg'] = p_info['leftleg_ht']
participant_settingsHeat['Right Leg'] = p_info['rightleg_ht']
participant_settingsHeat['Chest'] = p_info['chest_ht']
participant_settingsHeat['Abdomen'] = p_info['abdomen_ht']
# Warm Settings
participant_settingsWarm['Left Face'] = p_info['leftface_st']+1
participant_settingsWarm['Right Face'] = p_info['rightface_st']+1
participant_settingsWarm['Left Arm'] = p_info['leftarm_st']+1
participant_settingsWarm['Right Arm'] = p_info['rightarm_st']+1
participant_settingsWarm['Left Leg'] = p_info['leftleg_st']+1
participant_settingsWarm['Right Leg'] = p_info['rightleg_st']+1
participant_settingsWarm['Chest'] = p_info['chest_st']+1
participant_settingsWarm['Abdomen'] = p_info['abdomen_st']+1
# count number of existing sessions and set the session number
bodymap_num = 1
ses_num = 1
expInfo2 = {
'bodymap first- or second-half (1 or 2)': bodymap_num,
'session': ses_num,
'scanner': ''
}
dlg2 = gui.DlgFromDict(title="WASABI Body-Site Scan", dictionary=expInfo2, sortKeys=False)
expInfo['session'] = expInfo2['session']
expInfo['scanner'] = expInfo2['scanner']
if dlg2.OK == False:
core.quit() # user pressed cancel
else:
errorDlg1 = gui.Dlg(title="Error - invalid file")
errorDlg1.addText("Selected file is not a valid calibration file. Data is incorrectly formatted. (Wrong dimensions)")
errorDlg1.show()
dlg1=None
else:
errorDlg2 = gui.Dlg(title="Error - invalid file")
errorDlg2.addText("Selected file is not a valid calibration file. Name is not formatted sub-XXX_task-Calibration_participant.tsv")
errorDlg2.show()
dlg1=None
if dlg1==None:
dlg2 = gui.DlgFromDict(title="WASABI Body-Site Scan", dictionary=expInfo, sortKeys=False)
if dlg2.OK == False:
core.quit() # user pressed cancel
pphDlg = gui.DlgFromDict(participant_settingsHeat,
title='Participant Heat Parameters')
if pphDlg.OK == False:
core.quit()
ppwDlg = gui.DlgFromDict(participant_settingsWarm,
title='Participant Warmth Parameters')
if ppwDlg.OK == False:
core.quit()
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
if expInfo['bodymap first- or second-half (1 or 2)'] == '1':
expName = 'bodymap1'
if expInfo['bodymap first- or second-half (1 or 2)'] == '2':
expName = 'bodymap2'
"""
3. Setup the Window
DBIC uses a Panasonic DW750 Projector with a native resolution of 1920x1200 (16:10), but it is configured at 1920x1080 (16:9) at DBIC
Configure a black window with a 16:9 aspect ratio during development (1280x720) and production (1920x1080)
fullscr = False for testing, True for running participants
"""
if debug == 1:
win = visual.Window(
size=[1280, 720], fullscr=False,
screen=0, # Change this to the appropriate display
winType='pyglet', allowGUI=True, allowStencil=True,
monitor='testMonitor', color=[-1.000,-1.000,-1.000], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
else:
win = visual.Window(
size=[1920, 1080], fullscr=True,
screen=4, # Change this to the appropriate fMRI projector
winType='pyglet', allowGUI=True, allowStencil=True,
monitor='testMonitor', color=[-1.000,-1.000,-1.000], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
"""
4. Prepare Experimental Dictionaries for Body-Site Cues and Medoc Temperature Programs
"""
## Check gender for Chest cue
Chest_imgPath = os.sep.join([stimuli_dir,"cue","ChestF.png"])
if expInfo['gender'] in {"M", "m", "Male", "male"}:
Chest_imgPath = os.sep.join([stimuli_dir,"cue","ChestM.png"])
elif expInfo['gender'] in {"F", "f", "Female", "female"}:
Chest_imgPath = os.sep.join([stimuli_dir,"cue","ChestF.png"])
bodysite_word2img = {"Left Face": os.sep.join([stimuli_dir,"cue","LeftFace.png"]),
"Right Face": os.sep.join([stimuli_dir,"cue","RightFace.png"]),
"Left Arm": os.sep.join([stimuli_dir,"cue","LeftArm.png"]),
"Right Arm": os.sep.join([stimuli_dir,"cue","RightArm.png"]),
"Left Leg": os.sep.join([stimuli_dir,"cue","LeftLeg.png"]),
"Right Leg": os.sep.join([stimuli_dir,"cue","RightLeg.png"]),
"Chest": Chest_imgPath,
"Abdomen": os.sep.join([stimuli_dir,"cue","Abdomen.png"])
}
bodysite_word2heatcode = {"Left Face": leftface_heat,
"Right Face": rightface_heat,
"Left Arm": leftarm_heat,
"Right Arm": rightarm_heat,
"Left Leg": leftleg_heat,
"Right Leg": rightleg_heat,
"Chest": chest_heat,
"Abdomen": abdomen_heat
}
bodysite_word2warmcode = {"Left Face": leftface_warm,
"Right Face": rightface_warm,
"Left Arm": leftarm_warm,
"Right Arm": rightarm_warm,
"Left Leg": leftleg_warm,
"Right Leg": rightleg_warm,
"Chest": chest_warm,
"Abdomen": abdomen_warm
}
bodysite_word2imaginecode = {"Left Face": leftface_imagine,
"Right Face": rightface_imagine,
"Left Arm": leftarm_imagine,
"Right Arm": rightarm_imagine,
"Left Leg": leftleg_imagine,
"Right Leg": rightleg_imagine,
"Chest": chest_imagine,
"Abdomen": abdomen_imagine
}
# Set up a dictionary for all the configured Medoc programs for the main thermode
thermode1_temp2program = {}
with open("thermode1_programs.txt") as f:
for line in f:
(key, val) = line.split()
thermode1_temp2program[float(key)] = int(val)
"""
5. Create Body-Site Pairs for each run for this participant
"""
# EAFP: Easier to ask forgiveness than permission style
# See if bodysite order was generated by the calibration file, otherwise make a new one.
try:
bodySites
except NameError:
bodySites_exists = False
else:
bodySites_exists = True
if bodySites_exists == False:
# a. Initialize 4 runs worth of body-site arrays
bodySites = ["Left Face", "Right Face", "Left Arm", "Right Arm", "Left Leg", "Right Leg", "Chest", "Abdomen"]
bodySites = bodySites[0:4]
random.shuffle(bodySites)
expInfo['body_site_order'] = str(bodySites)
"""
6. Prepare files to write
"""
psypy_filename = _thisDir + os.sep + u'data/%03d_%s_%s' % (int(expInfo['subject number']), expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='1.1.0',
extraInfo=expInfo, runtimeInfo=None,
savePickle=True, saveWideText=True,
dataFileName=psypy_filename)
# save a log file for detail verbose info
logFile = logging.LogFile(psypy_filename+'.log', level=logging.ERROR)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
"""
7. Initialize Trial-level Components
"""
# General Instructional Text
start_msg = 'Please wait. \nThe scan will begin shortly. \n Experimenter press [s] to continue.'
in_between_run_msg = 'Thank you.\n Please wait for the next run to start. \n Experimenter press [e] to continue.'
end_msg = 'This is the end of the experiment. \nPlease wait for instructions from the experimenter'
stimtrialTime = 13 # This becomes very unreliable with the use of poll_for_change().
poststimTime = 5 # Ensure that nonstimtrialTime - poststimTime is at least 5 or 6 seconds.
nonstimtrialTime = 13 # trial time in seconds (ISI)
#############
# Body Mapping Components
#############
# Initialize components for Routine "Introduction"
IntroductionClock = core.Clock()
Begin = visual.TextStim(win=win, name='Begin',
text='Thank you. \nPlease wait for the experimenter to press [Space].',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0,
anchorHoriz='center')
BeginTask = keyboard.Keyboard()
# Initialize components for Routine "BodySiteInstruction"
BodySiteInstructionClock = core.Clock()
BodySiteInstructionRead = keyboard.Keyboard()
BodySiteInstructionText = visual.TextStim(win, name='BodySiteInstructionText',
text="Experimenter: Please place thermodes on the designated body-site.",
font = 'Arial',
pos=(0, -.2), height=0.05, wrapWidth=1.6, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0,
anchorHoriz='center')
BodySiteImg = visual.ImageStim(
win=win,
name='BodySiteImg',
mask=None,
ori=0, pos=(0, 0), size=(.40,.40),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=512, interpolate=True, depth=0.0)
# Initialize components for Routine "ImaginationInstruction"
ImaginationInstructionClock = core.Clock()
ImaginationInstructionRead = keyboard.Keyboard()
# Create experimenter instructions
ImagineInstructionText = visual.TextStim(win, name='ImageInstructionText',
text="During this scan, you will occasionally see picture cues of the body part where the thermode is attached. When you see this, try to imagine as hard as you can that the thermal stimulations are more painful than they are. Try to focus on how unpleasant the pain is, for instance, how strongly you would like to remove yourself from it. Pay attention to the burning, stinging and shooting sensations. You can use your mind to turn up the dial of the pain, much like turning up the volume dial on a stereo. As you feel the pain rise in intensity, imagine it rising faster and faster and going higher and higher. Picture your skin being held up against a glowing hot metal or fire. Think of how disturbing it is to be burned, and visualize your skin sizzling, melting, and bubbling as a result of the intense heat.",
font = 'Arial',
pos=(0, 0), height=0.06, wrapWidth=1.5, ori=0, # Edit wrapWidth for the 1920 full screen
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0,
anchorHoriz='center')
ImagineInstructionText.size=(1,1)
# Initialize components for Routine "StimTrial"
# Four Conditions: Hot, Warm, Rest
StimTrialClock = core.Clock()
fix_cross = visual.TextStim(win = win, text = '+', color = [1,1,1], height = 0.3, anchorHoriz='center')
# Initialize components for Routine "NonStimTrial"
# 2 Conditions: Imagine
NonStimTrialClock = core.Clock()
# fix_cross = visual.TextStim(win = win, text = '+', color = [1,1,1], height = 0.3)
image_shortinstr = "Your skin is being held up against a glowing hot metal or fire. \nVisualize your skin sizzling, melting and bubbling | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@version: 1.4.0
@file: GSP_main.py
@time: 2021/1/26 10:50
@functions: graph signal processing main script
@update: support Yeo-ICN definition
@update: support ICN-level brain activity and connecitivty strength saving
"""
import numpy as np
import glob
import os
import time
import matplotlib.pyplot as plt
from pygsp import graphs, filters, plotting
from GSP_utilities import surrogate_BOLD_create, save_variable, load_variable
import pandas as pd
from dppd import dppd
dp, X = dppd()
# 1. path locations and parameters
start = time.time()
deriv_path = '/home/amax/data/cye/MScohort_BIDS_clean/derivatives'
connectome_path = os.path.join(deriv_path, 'mrtrix')
xcpengine_path = os.path.join(deriv_path, 'xcpengine')
network_assign_path = 'CAB-NP_v1.1_Labels-ReorderedbyNetworks_Yeo.csv'
num_BOLD_timepoints = 180
num_rand = 100 # number of surrogates
functional_type = 'BOLD'
tract_type = 'meanlength' # one of the following: invlength, invnodevol, level-participant_connectome, meanlength
ICN_type = 'Yeo' # one of the following: 'Yeo', 'Cole'
normalize_type = 'both' # 'W': normalize W; 'L': normalize Laplacian (Preti method); 'both': normalize both W and Laplacian
# 2. read network assignment for hcpmmp
network_assign_csv = pd.read_csv(network_assign_path)
network_assign_csv = dp(network_assign_csv).mutate(NETWORK=X.Yeo_NETWORK).pd
network_assign_csv = dp(network_assign_csv).mutate(NETWORKKEY=X.Yeo_NETWORKKEY).pd
num_network_df = dp(network_assign_csv).summarise((X.NETWORKKEY, np.max, 'hp_max')).pd
num_network = num_network_df.iloc[0,0]
network_rowindex_ls = []
for network_i in range(1,num_network+1):
df_network = dp(network_assign_csv).filter_by(X.NETWORKKEY == network_i).pd
network_rowindex_ls.append(df_network.index.values)
network_unique_df = dp(network_assign_csv).distinct('NETWORKKEY').pd
network_unique_df = network_unique_df.sort_values(by='NETWORKKEY',ascending = True)
network_unique_df = dp(network_unique_df).filter_by(-X.NETWORK.isin(['Undefine'])).pd # remove undefined ICN
network_unique_df = network_unique_df.reset_index()
# 3. define group of interests
cohort1 = 'ms'
cohort2 = 'nc'
cohort3 = 'nmo'
cohort4 = 'cis'
cohort1_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort1 + '*'))
cohort2_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort2 + '*'))
cohort3_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort3 + '*'))
cohort4_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort4 + '*'))
cohort_connectome_ls = cohort1_connectome_ls + cohort2_connectome_ls + cohort3_connectome_ls + cohort4_connectome_ls
cohort_connectome_ls.sort()
cohort1_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort1 + '*'))
cohort2_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort2 + '*'))
cohort3_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort3 + '*'))
cohort4_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort4 + '*'))
cohort_fmri_ls = cohort1_fmri_ls + cohort2_fmri_ls + cohort3_fmri_ls + cohort4_fmri_ls
cohort_name_ls = [os.path.basename(item) for item in cohort_connectome_ls]
remove_name_ls = ['sub-nc011','sub-nc039', 'sub-nmo002', 'sub-nmo019', 'sub-cis002','sub-cis015', 'sub-ms015'] # problematic cases
cohort_name_ls = list(set(cohort_name_ls) - set(remove_name_ls)) # remove problematic cases
for i in remove_name_ls: # remove problematic cases
cohort_connectome_ls = [x for x in cohort_connectome_ls if i not in x]
cohort_fmri_ls = [x for x in cohort_fmri_ls if i not in x]
cohort_name_ls.sort()
cohort_connectome_ls.sort()
cohort_fmri_ls.sort()
if len(cohort_connectome_ls) != len(cohort_fmri_ls):
print('Number of connectome and xcpengine results not matched')
# 4. create a dataframe to store individual filepath
path_dict = {'subname':cohort_name_ls, 'mrtrix_path': cohort_connectome_ls, 'xcp_path':cohort_fmri_ls}
path_df = pd.DataFrame(path_dict, columns=['subname','mrtrix_path','xcp_path'])
path_df = dp(path_df).mutate(connectome_path=X.mrtrix_path + '/connectome/' + X.subname +'_parc-hcpmmp1_' + tract_type + '.csv').pd
path_df = dp(path_df).mutate(BOLD_series_path=X.xcp_path + '/fcon/hcpmmp/hcpmmp.1D').pd
path_df = dp(path_df).mutate(fmri_map_path=X.xcp_path + '/roiquant/hcpmmp/' + X.subname +'_hcpmmp_mean.csv').pd
print('finished step 4')
# 5. load individual connectome as ndarray
num_parcels = len(network_assign_csv)
num_sub = len(path_df)
path_df_nc = dp(path_df).filter_by(X.subname.str.contains('nc')).pd
num_nc = len(path_df_nc)
nc_idx = path_df_nc.index
connectome_array = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
indiviudal_connectome = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
connectome_array[:,:,sub_idx] = indiviudal_connectome
# 6. load individual BOLD series and fill missing part according to /fcon/hcpmmp/missing.txt
BOLD_series_3D = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
BOLD_series = np.genfromtxt(path_df.loc[sub_idx, 'BOLD_series_path'])
BOLD_series = BOLD_series.T
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if BOLD_series[missing_parcel_id-1,:].sum() != 0:
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_parcel_id-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
else: # multiple parcels missing
for missing_idx in missing_parcel_id:
network_key = network_assign_csv.loc[missing_idx-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_idx-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
BOLD_series_3D[:,:,sub_idx] = BOLD_series
print('finished loading individual BOLD series and filling missing part')
# 7. load fmri parametric map and fill missing part according to /fcon/hcpmmp/missing.txt
fmri_paramap = np.zeros(shape=(num_parcels, num_sub))
paramap_str = 'mean_alffZ'
for sub_idx in range(len(path_df)):
fmri_map = pd.read_csv(path_df.loc[sub_idx, 'fmri_map_path'],index_col=0)
fmri_map = fmri_map.loc[:,paramap_str]
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if not np.isnan(fmri_map[missing_parcel_id]):
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
fmri_map[int(missing_parcel_id)] = np.mean(fmri_map[network_parcel_idx])
fmri_map = fmri_map.to_numpy()
else: # multiple parcels missing
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_rowindex_ls = np.array(network_rowindex_ls, dtype=object)
network_parcel_idx = network_rowindex_ls[network_key-1]
for parcel_i in range(missing_parcel_id.size):
fmri_map[int(missing_parcel_id[parcel_i])] = np.mean(fmri_map[network_parcel_idx[parcel_i]])
fmri_map = fmri_map.to_numpy()
fmri_paramap[:,sub_idx] = fmri_map
print('finished loading fmri parametric map and fill missing part')
# 8. load connectome and functional signal and do GSP
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
func_sig = BOLD_series_3D
s_head_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_rand_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub, num_rand))
else:
raise ValueError('undefined functional signal')
G_U_cohort = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
W = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
# Symmetric Normalization of adjacency matrix
D = np.diag(np.sum(W,1)) #degree
D_power = np.power(D, (-1/2))
D_power[np.isinf(D_power)] = 0
Wsymm = D_power @ W @ D_power
#The eigenvector matrix G.U is used to define the Graph Fourier Transform of the graph signal S
if normalize_type == 'W':
G = graphs.Graph(Wsymm)
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'L':
G = graphs.Graph(W, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'both':
Wsymm = np.triu(Wsymm) + np.triu(Wsymm).T - np.diag(np.triu(Wsymm).diagonal()) # force symmetric
G = graphs.Graph(Wsymm, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
# L = np.eye(len(Wsymm)) - Wsymm
# lamda, U = np.linalg.eig(L)
# U = U[:, np.argsort(lamda)]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head = U.T @ func_sig[:,:,sub_idx]
s_head_cohort[:,:,sub_idx] = s_head
# calcualte surrogate for individual
s_rand_cohort[:,:,sub_idx,:] = surrogate_BOLD_create(U, func_sig[:,:,sub_idx], num_rand)
print('finished Graph Fourier Transform')
# save_variable(G_U_cohort, 'G_U_cohort.pkl')
# save_variable(s_head_cohort, 's_head_cohort.pkl')
# save_variable(s_rand_cohort, 's_rand_cohort.pkl')
# G_U_cohort = load_variable('G_U_cohort.pkl')
# s_head_cohort = load_variable('s_head_cohort.pkl')
# s_rand_cohort = load_variable('s_rand_cohort.pkl')
# 8.5(optional). plot Sihag2020 plot
# take nc001 as example
nc001_idx = path_df.subname[path_df.subname == 'sub-nc001'].index.tolist()[0]
s_low = G_U_cohort[:,0:4, nc001_idx] @ s_head_cohort[0:4,:,nc001_idx]
s_high = G_U_cohort[:,-55:-51, nc001_idx] @ s_head_cohort[-55:-51,:,nc001_idx]
np.savetxt("nc001_s_low_both.csv", s_low, delimiter=",")
np.savetxt("nc001_s_high_both.csv", s_high, delimiter=",")
# 9. calculate the median-split threshold
NC_index = [cohort_name_ls.index(x) for x in cohort_name_ls if 'nc' in x]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head_NC = s_head_cohort[:,:,NC_index]
s_head_NC_square = np.power(s_head_NC, 2)
#s_head_NC_square = np.power(s_head_NC_square, 1/2)
s_head_NC_square_mean = np.mean(s_head_NC_square, (1,2)) # average for each timepoint and each subject
s_head_NC_AUCTOT = np.trapz(s_head_NC_square_mean)
i=0
AUC=0
while AUC < s_head_NC_AUCTOT/2:
AUC = np.trapz(s_head_NC_square_mean[:i])
i = i + 1
cutoff = i-1
print('finished calculating the median-split threshold')
print('cutoff = {}'.format(cutoff))
# 10. calculate decoupling index for empirical data
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_liberal_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
s_aligned_cohort[:,:,sub_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_head_cohort[0:cutoff,:,sub_idx]
s_liberal_cohort[:,:,sub_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_head_cohort[cutoff-1:-1,:,sub_idx]
s_aligned_individual = np.linalg.norm(s_aligned_cohort, ord=2, axis=1)
s_liberal_individual = np.linalg.norm(s_liberal_cohort, ord=2, axis=1)
s_deCoupIdx_individual = s_liberal_individual / s_aligned_individual
s_aligned = np.mean(s_aligned_individual[:,nc_idx], axis=1)
s_liberal = np.mean(s_liberal_individual[:,nc_idx], axis=1)
s_deCoupIdx_node = s_liberal/s_aligned # only for NC
print('finished calculating decoupling index for empirical data')
# 11. calculate decoupling index for surrogate data only for NC
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
s_liberal_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
for i, sub_idx in enumerate(nc_idx):
for rand_idx in range(num_rand):
s_aligned_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_rand_cohort[0:cutoff,:,sub_idx,rand_idx]
s_liberal_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_rand_cohort[cutoff-1:-1,:,sub_idx,rand_idx]
# norm for BOLD timepoints
s_aligned_norm_rand = np.linalg.norm(s_aligned_cohort_rand, ord=2, axis=1)
s_liberal_norm_rand = np.linalg.norm(s_liberal_cohort_rand, ord=2, axis=1)
# average for cohorts
s_aligned_rand = np.mean(s_aligned_norm_rand, axis=1)
s_liberal_rand = np.mean(s_liberal_norm_rand, axis=1)
# decoupling index
s_deCoupIdx_node_rand = s_liberal_rand/s_aligned_rand
print('finished calculating decoupling index for surrogate data')
# 12. network-level harmonics for emperical and surrogate data
s_aligned_network = np.zeros(shape=(num_network))
s_liberal_network = np.zeros(shape=(num_network))
s_aligned_network_individual = np.zeros(shape=(num_network, num_sub))
s_liberal_network_individual = np.zeros(shape=(num_network, num_sub))
s_aligned_network_rand = np.zeros(shape=(num_network, num_rand))
s_liberal_network_rand = np.zeros(shape=(num_network, num_rand))
for i in range(num_network):
s_aligned_network[i] = np.mean(s_aligned[network_rowindex_ls[i]])
s_liberal_network[i] = np.mean(s_liberal[network_rowindex_ls[i]])
s_aligned_network_individual[i,:] = np.mean(s_aligned_individual[network_rowindex_ls[i],:], axis=0)
s_liberal_network_individual[i,:] = np.mean(s_liberal_individual[network_rowindex_ls[i],:], axis=0)
s_aligned_network_rand[i,:] = np.mean(s_aligned_rand[network_rowindex_ls[i],:], axis=0)
s_liberal_network_rand[i,:] = np.mean(s_liberal_rand[network_rowindex_ls[i],:], axis=0)
s_deCoupIdx_network = s_liberal_network/s_aligned_network
s_deCoupIdx_network_individual = s_liberal_network_individual/s_aligned_network_individual
s_deCoupIdx_network_rand = s_liberal_network_rand/s_aligned_network_rand
# 13. brain-level harmonics for emperical and surrogate data
s_aligned_brain = np.mean(s_aligned)
s_liberal_brain = np.mean(s_liberal)
s_deCoupIdx_brain = s_liberal_brain/s_aligned_brain
s_aligned_brain_individual = np.mean(s_aligned_individual, axis=0)
s_liberal_brain_individual = np.mean(s_liberal_individual, axis=0)
s_deCoupIdx_brain_individual = s_liberal_brain_individual/s_aligned_brain_individual
s_aligned_brain_rand = np.mean(s_aligned_rand, axis=0)
s_liberal_brain_rand = np.mean(s_liberal_rand, axis=0)
s_deCoupIdx_brain_rand = s_liberal_brain_rand/s_aligned_brain_rand
print('s_deCoupIdx_brain = {}'.format(s_deCoupIdx_brain))
# 14. significance of surrogate for plot
# node-level
s_deCoupIdx_node_significance = np.logical_or((np.percentile(s_deCoupIdx_node_rand, 5, axis=1) >= s_deCoupIdx_node), (np.percentile(s_deCoupIdx_node_rand, 95, axis=1) <= s_deCoupIdx_node))
s_deCoupIdx_node_significance = s_deCoupIdx_node_significance.astype(np.int)
# network-level
s_deCoupIdx_network_significance = np.logical_or((np.percentile(s_deCoupIdx_network_rand, 5, axis=1) >= s_deCoupIdx_network), (np.percentile(s_deCoupIdx_network_rand, 95, axis=1) <= s_deCoupIdx_network))
s_deCoupIdx_network_significance = s_deCoupIdx_network_significance.astype(np.int)
# brain-level
s_deCoupIdx_brain_significance = np.logical_or((np.percentile(s_deCoupIdx_brain_rand, 5, axis=0) >= s_deCoupIdx_brain), (np.percentile(s_deCoupIdx_brain_rand, 95, axis=0) <= s_deCoupIdx_brain))
# 15. save results to csv
if normalize_type == 'W':
normalize_str = '_W'
elif normalize_type == 'L':
normalize_str = '_L'
elif normalize_type == 'both':
normalize_str = '_both'
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
csv_folder = 'BOLD_4D_' + tract_type + '_' + normalize_str
if not os.path.exists(os.path.abspath(csv_folder)):
os.mkdir(os.path.abspath(csv_folder))
# save surrogate (ndarray with num_rand × num_region)
s_deCoupIdx_node_rand_df = pd.DataFrame(data = s_deCoupIdx_node_rand.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_rand_df = pd.DataFrame(data = s_deCoupIdx_network_rand.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_rand_df = pd.DataFrame(data = s_deCoupIdx_brain_rand)
s_deCoupIdx_node_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_rand_df.csv'))
s_deCoupIdx_network_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_rand_df.csv'))
s_deCoupIdx_brain_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_rand_df.csv'))
# save surrogate significance (ndarray with 1 × num_region)
s_deCoupIdx_node_significance_df = | |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' The Embedding Model class adds training, evaluation,feature analysis routines for learning embedding '''
from copy import deepcopy
from dlpy import Model
from dlpy.layers import Input, EmbeddingLoss, OutputLayer, Keypoints
from .attribute_utils import create_extended_attributes
from .image_embedding import ImageEmbeddingTable
from .model import DataSpec
from .network import WeightsTable
from dlpy.utils import DLPyError
class EmbeddingModel(Model):
input_layer_name_prefix = 'InputLayer_'
embedding_layer_name_prefix = 'EmbeddingLayer_'
embedding_loss_layer_name = 'EmbeddingLossLayer'
number_of_branches = 0
data_specs = None
embedding_model_type = None
branch_input_tensor = None
branch_output_tensor = None
@classmethod
def build_embedding_model(cls, branch, model_table=None, embedding_model_type='Siamese',
embedding_layer=None, margin=None):
'''
Build an embedding model based on a given model branch and model type
Parameters
----------
branch : Model
Specifies the base model that is used as branches for embedding model.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
Default: None
embedding_model_type : string, optional
Specifies the embedding model type that the created table will be applied for training.
Valid values: Siamese, Triplet, and Quartet.
Default: Siamese
embedding_layer: Layer, optional
Specifies a dense layer as the embedding layer. For instance, Dense(n=10, act='identity') defines
the embedding dimension is 10. When it is not given, the last layer (except the task layers)
in the branch model will be used as the embedding layer.
margin: double, optional
Specifies the margin value used by the embedding model. When it is not given, for Siamese, margin is 2.0.
Otherwise, margin is 0.0.
Returns
-------
:class:`Model`
'''
# check the branch type
if not isinstance(branch, Model):
raise DLPyError('The branch option must contain a valid model')
# the branch must be built using functional APIs
# only functional model has the attr output_layers
if not hasattr(branch, 'output_layers'):
print("NOTE: Convert the branch model into a functional model.")
branch_tensor = branch.to_functional_model()
else:
branch_tensor = deepcopy(branch)
# always reset this local tensor to 0
branch_tensor.number_of_instances = 0
# the branch cannot contain other task layers
if len(branch_tensor.output_layers) != 1:
raise DLPyError('The branch model cannot contain more than one output layer')
elif branch_tensor.output_layers[0].type == OutputLayer.type or \
branch_tensor.output_layers[0].type == Keypoints.type:
print("NOTE: Remove the task layers from the model.")
branch_tensor.layers.remove(branch_tensor.output_layers[0])
branch_tensor.output_layers[0] = branch_tensor.layers[-1]
elif branch_tensor.output_layers[0].can_be_last_layer:
raise DLPyError('The branch model cannot contain task layer except output or keypoints layer.')
# check embedding_model_type
if embedding_model_type.lower() not in ['siamese', 'triplet', 'quartet']:
raise DLPyError('Only Siamese, Triplet, and Quartet are valid.')
if embedding_model_type.lower() == 'siamese':
if margin is None:
margin = 2.0
cls.number_of_branches = 2
elif embedding_model_type.lower() == 'triplet':
if margin is None:
margin = 0.0
cls.number_of_branches = 3
elif embedding_model_type.lower() == 'quartet':
if margin is None:
margin = 0.0
cls.number_of_branches = 4
cls.embedding_model_type = embedding_model_type
# build the branches
input_layers = []
branch_layers = []
for i_branch in range(cls.number_of_branches):
temp_input_layer = Input(**branch_tensor.layers[0].config, name=cls.input_layer_name_prefix + str(i_branch))
temp_branch = branch_tensor(temp_input_layer) # return a list of tensors
if embedding_layer:
temp_embed_layer = deepcopy(embedding_layer)
temp_embed_layer.name = cls.embedding_layer_name_prefix + str(i_branch)
temp_branch = temp_embed_layer(temp_branch)
# change tensor to a list
temp_branch = [temp_branch]
else:
# change the last layer name to the embedding layer name
temp_branch[-1]._op.name = cls.embedding_layer_name_prefix + str(i_branch)
if i_branch == 0:
cls.branch_input_tensor = temp_input_layer
if len(temp_branch) == 1:
cls.branch_output_tensor = temp_branch[0]
else:
cls.branch_output_tensor = temp_branch
# append these layers to the current branch
input_layers.append(temp_input_layer)
branch_layers = branch_layers + temp_branch
# add the embedding loss layer
loss_layer = EmbeddingLoss(margin=margin, name=cls.embedding_loss_layer_name)(branch_layers)
# create the model DAG using all the above model information
model = EmbeddingModel(branch.conn, model_table=model_table, inputs=input_layers, outputs=loss_layer)
# sharing weights
# get all layer names from one branch
num_l = int((len(model.layers) - 1) / cls.number_of_branches)
br1_name = [i.name for i in model.layers[:num_l - 1]]
# build the list that contain the shared layers
share_list = []
n_id = 0
n_to = n_id + cls.number_of_branches
for l in br1_name[1:]:
share_list.append({l: [l + '_' + str(i + 1) for i in range(n_id + 1, n_to)]})
# add embedding layers
share_list.append({cls.embedding_layer_name_prefix + str(0):
[cls.embedding_layer_name_prefix + str(i)
for i in range(1, cls.number_of_branches)]})
model.share_weights(share_list)
model.compile()
# generate data_specs
if embedding_model_type.lower() == 'siamese':
cls.data_specs = [DataSpec(type_='image', layer=cls.input_layer_name_prefix + '0', data=['_image_']),
DataSpec(type_='image', layer=cls.input_layer_name_prefix + '1', data=['_image_1']),
DataSpec(type_='numnom', layer=cls.embedding_loss_layer_name, data=['_dissimilar_'])]
elif embedding_model_type.lower() == 'triplet':
cls.data_specs = [DataSpec(type_='image', layer=cls.input_layer_name_prefix + '0', data=['_image_']),
DataSpec(type_='image', layer=cls.input_layer_name_prefix + '1', data=['_image_1']),
DataSpec(type_='image', layer=cls.input_layer_name_prefix + '2', data=['_image_2'])]
elif embedding_model_type.lower() == 'quartet':
cls.data_specs = [DataSpec(type_='image', layer=cls.input_layer_name_prefix + '0', data=['_image_']),
DataSpec(type_='image', layer=cls.input_layer_name_prefix + '1', data=['_image_1']),
DataSpec(type_='image', layer=cls.input_layer_name_prefix + '2', data=['_image_2']),
DataSpec(type_='image', layer=cls.input_layer_name_prefix + '3', data=['_image_3'])]
return model
def fit_embedding_model(self, optimizer,
data=None, path=None, n_samples=512, label_level=-2,
resize_width=None, resize_height=None,
max_iter=1,
valid_table=None, valid_freq=1, gpu=None, seed=0, record_seed=0,
save_best_weights=False, n_threads=None,
train_from_scratch=None):
"""
Fitting a deep learning model for embedding learning.
Parameters
----------
optimizer : :class:`Optimizer`
Specifies the parameters for the optimizer.
data : class:`ImageEmbeddingTable`, optional
This is the input data. It muse be a ImageEmbeddingTable object. Either data or path has to be specified.
path : string, optional
The path to the image directory on the server.
Path may be absolute, or relative to the current caslib root.
when path is specified, the data option will be ignored.
A new sample of data will be randomly generated after the number of epochs defined in Optimizer.
max_iter defines how many iterations the random sample will be generated.
n_samples : int, optional
Number of samples to generate.
Default: 512
label_level : int, optional
Specifies which path level should be used to generate the class labels for each image.
This class label determines whether a given image pair belongs to the same class.
For instance, label_level = 1 means the first directory and label_level = -2 means the last directory.
This internally use the SAS scan function
(check https://www.sascrunch.com/scan-function.html for more details).
Default: -2
resize_width : int, optional
Specifies the image width that needs be resized to. When resize_width is not given, it will be reset to
the specified resize_height.
resize_height : int, optional
Specifies the image height that needs be resized to. When resize_height is not given, it will be reset to
the specified resize_width.
max_iter : int, optional
Hard limit on iterations when randomly generating data.
Default: 1
valid_table : string or CASTable, optional
Specifies the table with the validation data. The validation
table must have the same columns and data types as the training table.
valid_freq : int, optional
Specifies the frequency for scoring the validation table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
seed : double, optional
specifies the random number seed for the random number generator
in SGD. The default value, 0, and negative values indicate to use
random number streams based on the computer clock. Specify a value
that is greater than 0 for a reproducible random number sequence.
record_seed : double, optional
specifies the random number seed for the random record selection
within a worker. The default value 0 disables random record selection.
Records are read as they are laid out in memory.
Negative values indicate to use random number streams based on the
computer clock.
save_best_weights : bool, optional
When set to True, it keeps the weights that provide the smallest
loss error.
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
train_from_scratch : bool, optional
When set | |
#!../../../../virtualenv/bin/python3
# -*- coding: utf-8 -*-
# NB: The shebang line above assumes you've installed a python virtual environment alongside your working copy of the
# <4most-4gp-scripts> git repository. It also only works if you invoke this python script from the directory where it
# is located. If these two assumptions are incorrect (e.g. you're using Conda), you can still use this script by typing
# <python cannon_test.py>, but <./cannon_test.py> will not work.
"""
Take a training set and a test set, and see how well the Cannon can reproduce the stellar labels on the test
set of stars.
In theory, this code can be used on either continuum-normalised spectra, or non-continuum-normalised spectra. It uses
the metadata field "continuum_normalised" as a flag to determine which kind of spectrum has been supplied.
Note, however, that the continuum normalisation code is currently largely untested, so in practice you should always
pass this code continuum normalised spectra if you want scientifically meaningful results.
"""
import argparse
import gzip
import json
import logging
import os
import time
from os import path as os_path
import numpy as np
from fourgp_cannon import __version__ as fourgp_version
from fourgp_degrade import SpectrumProperties
from fourgp_speclib import SpectrumLibrarySqlite
def select_cannon(continuum_normalisation="none", cannon_version="casey_old"):
"""
Select which Cannon wrapper to use, based on whether we've been asked to do continuum normalisation or not.
:param continuum_normalisation:
String indicating the name of the continuum normalisation scheme we've been asked to use. It is recommended
to use "none", meaning that you've already done continuum normalisation.
:param cannon_version:
The name of the Cannon version to use. Must be one of "casey_old", "casey_new", "anna_ho".
:return:
A list of three items:
1. A class which wraps the Cannon.
2. Boolean flag indicating whether we want the training set already continuum normalised before input.
3. Boolean flag indicating whether we want the test set already continuum normalised before input.
"""
# Make sure that a valid version of the Cannon is selected
assert cannon_version in ["casey_old", "casey_new", "anna_ho"]
# We only import the Cannon inside this if statement, so that the user doesn't have to have all the Cannon
# versions installed to use one of them.
if cannon_version == "casey_old":
from fourgp_cannon.cannon_wrapper_casey_old import \
CannonInstanceCaseyOld, \
CannonInstanceCaseyOldWithContinuumNormalisation, CannonInstanceCaseyOldWithRunningMeanNormalisation
cannon_classes = {
"vanilla": CannonInstanceCaseyOld,
"automatic_continuum_normalisation": CannonInstanceCaseyOldWithContinuumNormalisation,
"running_mean_normalisation": CannonInstanceCaseyOldWithRunningMeanNormalisation
}
elif cannon_version == "casey_new":
from fourgp_cannon.cannon_wrapper_casey_new import \
CannonInstanceCaseyNew, \
CannonInstanceCaseyNewWithContinuumNormalisation, CannonInstanceCaseyNewWithRunningMeanNormalisation
cannon_classes = {
"vanilla": CannonInstanceCaseyNew,
"automatic_continuum_normalisation": CannonInstanceCaseyNewWithContinuumNormalisation,
"running_mean_normalisation": CannonInstanceCaseyNewWithRunningMeanNormalisation
}
elif cannon_version == "anna_ho":
from fourgp_cannon.cannon_wrapper_anna_ho import CannonInstanceAnnaHo
cannon_classes = {
"vanilla": CannonInstanceAnnaHo,
"automatic_continuum_normalisation": None,
"running_mean_normalisation": None
}
else:
assert False, "Unknown Cannon version <{}>".format(cannon_version)
# Make sure that a valid continuum normalisation option is selected
assert continuum_normalisation in ["none", "running_mean", "polynomial"]
# Running mean normalisation. We accept flux-normalised spectra, and normalised each pixel by the mean flux in
# a running window of pixels on either side of that pixel.
if continuum_normalisation == "running_mean":
cannon_class = cannon_classes["running_mean_normalisation"]
continuum_normalised_training = False
continuum_normalised_testing = False
# Attempt to continuum normalise the spectra by fitting a polynomial to it. This implementation is really crude
# and doesn't really manage to fit the continuum at all, so the results are a disaster.
elif continuum_normalisation == "polynomial":
cannon_class = cannon_classes["automatic_continuum_normalisation"]
continuum_normalised_training = True
continuum_normalised_testing = False
# Assume that spectra have already been continuum normalised. You must use this option for now if you want
# sensible results.
else:
cannon_class = cannon_classes["vanilla"]
continuum_normalised_training = True
continuum_normalised_testing = True
return cannon_class, continuum_normalised_testing, continuum_normalised_training
def resample_spectrum(spectrum, training_spectra):
"""
Resample a test spectrum onto the same raster as the training spectra. This may be necessary if for some reason
the test spectra are on a different raster, but it's not generally a good idea.
:param spectrum:
The test spectrum which is on a different raster to the training spectra.
:param training_spectra:
A sample training spectra, demonstrating the raster that the test spectrum needs to be on.
:return:
A resampled version of the test spectrum.
"""
from fourgp_degrade.resample import SpectrumResampler
first_training_spectrum = training_spectra.extract_item(0)
resampler = SpectrumResampler(input_spectrum=spectrum)
spectrum_new = resampler.match_to_other_spectrum(other=first_training_spectrum)
spectrum_new.metadata = spectrum.metadata
return spectrum_new
def autocomplete_scaled_solar_abundances(training_library, training_library_ids_all, label_list):
"""
Where stars have elemental abundances missing, insert scaled-solar values.
:param training_library:
SpectrumLibrary containing the spectra we are to train the Cannon on.
:type training_library:
SpectrumLibrarySqlite
:param training_library_ids_all:
List of the UIDs of the training spectra we are to use.
:type training_library_ids_all:
list
:param label_list:
The list of the labels which must be set on every spectrum.
:return:
A list of two items:
0. A list of the IDs of the selected spectra
1. A SpectrumArray of the selected spectra
"""
input_spectra = training_library.open(ids=training_library_ids_all)
for index in range(len(input_spectra)):
metadata = input_spectra.get_metadata(index)
for label in label_list:
if (label not in metadata) or (metadata[label] is None) or (not np.isfinite(metadata[label])):
# print "Label {} in spectrum {} assumed as scaled solar.".format(label, index)
metadata[label] = metadata["[Fe/H]"]
output_spectra = input_spectra
return training_library_ids_all, output_spectra
def filter_training_spectra(training_library, training_library_ids_all, label_list):
"""
Filter the spectra in a SpectrumArray on the basis that they must have a list of metadata values defined.
:param training_library:
SpectrumLibrary containing the spectra we are to train the Cannon on.
:type training_library:
SpectrumLibrarySqlite
:param training_library_ids_all:
List of the UIDs of the training spectra we are to use.
:type training_library_ids_all:
list
:param label_list:
The list of labels which must be set in order for a spectrum to be accepted.
:return:
A list of two items:
0. A list of the IDs of the selected spectra
1. A SpectrumArray of the selected spectra
"""
input_spectra = training_library.get_metadata(ids=training_library_ids_all)
ids_filtered = []
for index, metadata in enumerate(input_spectra):
accept = True
for label in label_list:
if (label not in metadata) or (metadata[label] is None) or (not np.isfinite(metadata[label])):
accept = False
break
if accept:
ids_filtered.append(training_library_ids_all[index])
logging.info("Accepted {:d} / {:d} training spectra; others had labels missing.".
format(len(ids_filtered), len(training_library_ids_all)))
output_spectrum_ids = ids_filtered
output_spectra = training_library.open(ids=output_spectrum_ids)
return output_spectrum_ids, output_spectra
def evaluate_computed_labels(label_expressions, spectra):
"""
Evaluated computed labels for a spectrum. These are labels that are computed from multiple metadata items, such as
B-V colours.
:param label_expressions:
A list of the computed label expressions that we are to evaluate for each spectrum.
:param spectra:
A SpectrumArray of the spectra for which we are to compute each computed label. The computed labels are added
to the metadata dictionary for each spectrum.
:return:
None
"""
for index in range(len(spectra)):
metadata = spectra.get_metadata(index)
for label_expression in label_expressions:
value = eval(label_expression, metadata)
metadata[label_expression] = value
def create_censoring_masks(censoring_scheme, raster, censoring_line_list, label_fields, label_expressions):
"""
Create censoring masks for each label we are fitting, based on pixels around the lines of each element.
:param censoring_scheme:
Switch to specify how censoring is done. There are three options: 1, 2 or 3. In Scheme 1, all of the labels
the Cannon is fitting can see all pixels relevant to all the labels we're fitting. The censoring is a simple
mask, which is the same for all labels. In Scheme 2, each individual element can only see its own lines, but
Teff and log(g) can see all of the pixels used by at least one of the individual elements. Scheme 3 is similar,
but [Fe/H] is treated like Teff and log(g) and can see all the pixels used by at least one of the elements being
fitting.
For best results, use scheme 1.
:param raster:
The wavelength raster of the spectra we are fitting.
:param censoring_line_list:
The filename of the file with the line list we use create the censoring masks.
:param label_fields:
A list of the labels the Cannon is fitting. Used to determine which elements we need to include lines for.
:param label_expressions:
A list of the algebraic expressions for any label expressions we're fitting.
:return:
A dictionary of Boolean masks, one for each label.
"""
censoring_masks = None
if censoring_line_list != "":
window = 1 # How many Angstroms either side of the line should be used?
censoring_masks = {}
line_list_txt = open(censoring_line_list).readlines()
for label_name in label_fields:
allowed_lines = 0
mask = np.zeros(raster.size, dtype=bool)
# Loop over the lines in the line list | |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collapsed Amortized Variational Inference for SNLDS.
This is a reasonable baseline model for switching non-linear dynamical system
with the following architecture:
1. an inference network, with Bidirectional-RNN for input embedding, and a
forward RNN to get the posterior distribution of `q(z[1:T] | x[1:T])`.
2. a continuous state transition network, `p(z[t] | z[t-1], s[t])`.
3. a discrete state transition network that conditioned on the input,
`p(s[t] | s[t-1], x[t-1])`.
4. an emission network conditioned on the continuous hidden dynamics,
`p(x[t] | z[t])`.
It also contains a function, `create_model()`, to help to create the SNLDS
model discribed in ``Collapsed Amortized Variational Inference for Switching
Nonlinear Dynamical Systems``. 2019. https://arxiv.org/abs/1910.09588.
All the networks are configurable through function arguments `network_*`.
"""
import collections
import tensorflow as tf
import tensorflow_probability as tfp
from snlds import model_base
from snlds import utils
namedtuple = collections.namedtuple
layers = tf.keras.layers
tfd = tfp.distributions
tfpl = tfp.layers
RANDOM_SEED = 131
def construct_initial_state_distribution(
latent_dim,
num_categ,
use_trainable_cov=False,
use_triangular_cov=False,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=tf.float32,
name="z0"):
"""Construct the initial state distribution, `p(z[0])`.
Args:
latent_dim: an `int` scalar for dimension of continuous hidden states, `z`.
num_categ: an `int` scalar for number of discrete states, `s`.
use_trainable_cov: a `bool` scalar indicating whether the scale of `p(z[0])`
is trainable. Default to False.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `tf.float32`.
name: a `str` to construct names of variables.
Returns:
return_dist: a `tfp.distributions` instance for the initial state
distribution, `p(z[0])`.
"""
glorot_initializer = tf.keras.initializers.GlorotUniform()
z0_mean = tf.Variable(
initial_value=glorot_initializer(shape=[num_categ, latent_dim],
dtype=dtype),
name="{}_mean".format(name))
if use_triangular_cov:
z0_scale = tfp.math.fill_triangular(
tf.Variable(
initial_value=glorot_initializer(
shape=[int(latent_dim * (latent_dim + 1) / 2)],
dtype=dtype),
name="{}_scale".format(name),
trainable=use_trainable_cov))
z0_scale = (tf.maximum(tf.nn.softmax(z0_scale + raw_sigma_bias),
sigma_min)
* sigma_scale)
return_dist = tfd.Independent(
distribution=tfd.MultivariateNormalTriL(
loc=z0_mean, scale_tril=z0_scale),
reinterpreted_batch_ndims=0)
else:
z0_scale = tf.Variable(
initial_value=glorot_initializer(
shape=[latent_dim],
dtype=dtype),
name="{}_scale".format(name),
trainable=use_trainable_cov)
z0_scale = (tf.maximum(tf.nn.softmax(z0_scale + raw_sigma_bias),
sigma_min)
* sigma_scale)
return_dist = tfd.Independent(
distribution=tfd.MultivariateNormalDiag(
loc=z0_mean, scale_diag=z0_scale),
reinterpreted_batch_ndims=0)
return tfp.experimental.as_composite(return_dist)
class ContinuousStateTransition(tf.keras.Model):
"""Transition for `p(z[t] | z[t-1], s[t])`."""
def __init__(self,
transition_mean_networks,
distribution_dim,
num_categories=1,
cov_mat=None,
use_triangular_cov=False,
use_trainable_cov=True,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=tf.float32,
name="ContinuousStateTransition"):
"""Construct a `ContinuousStateTransition` instance.
Args:
transition_mean_networks: a list of `callable` networks, with the length
of list same as `num_categories`. Each one of the networks will take
previous step hidden state, `z[t-1]`, and returns the mean of
transition distribution, `p(z[t] | z[t-1], s[t]=i)` for each
discrete state `i`.
distribution_dim: an `int` scalar for dimension of continuous hidden
states, `z`.
num_categories: an `int` scalar for number of discrete states, `s`.
cov_mat: an optional `float` Tensor for predefined covariance matrix.
Default to `None`, in which case, a `cov` variable will be created.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
use_trainable_cov: a `bool` scalar indicating whether the scale of
the distribution is trainable. Default to False.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `tf.float32`.
name: a `str` to construct names of variables.
"""
super(ContinuousStateTransition, self).__init__()
assertion_str = (
"There has to be one transition mean networks for each discrete state")
assert len(transition_mean_networks) == num_categories, assertion_str
self.z_trans_networks = transition_mean_networks
self.num_categ = num_categories
self.use_triangular_cov = use_triangular_cov
self.distribution_dim = distribution_dim
if cov_mat:
self.cov_mat = cov_mat
elif self.use_triangular_cov:
self.cov_mat = tfp.math.fill_triangular(
tf.Variable(
tf.random.uniform(
shape=[
int(self.distribution_dim
* (self.distribution_dim + 1) / 2)],
minval=0., maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov))
self.cov_mat = tf.maximum(tf.nn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
else:
self.cov_mat = tf.Variable(
tf.random.uniform(shape=[self.distribution_dim],
minval=0.0, maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov)
self.cov_mat = tf.maximum(tf.nn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
def call(self, input_tensor, dtype=tf.float32):
input_tensor = tf.convert_to_tensor(input_tensor, dtype_hint=dtype)
batch_size, num_steps, distribution_dim = tf.unstack(tf.shape(input_tensor))
# The shape of the mean_tensor after tf.stack is [num_categ, batch_size,
# num_steps, distribution_dim].,
mean_tensor = tf.transpose(
tf.stack([
z_net(input_tensor) for z_net in self.z_trans_networks]),
[1, 2, 0, 3])
mean_tensor = tf.reshape(mean_tensor,
[batch_size, num_steps,
self.num_categ, distribution_dim])
if self.use_triangular_cov:
output_dist = tfd.MultivariateNormalTriL(
loc=mean_tensor,
scale_tril=self.cov_mat)
else:
output_dist = tfd.MultivariateNormalDiag(
loc=mean_tensor,
scale_diag=self.cov_mat)
return tfp.experimental.as_composite(output_dist)
@property
def output_event_dims(self):
return self.distribution_dim
class DiscreteStateTransition(tf.keras.Model):
"""Discrete state transition p(s[t] | s[t-1], x[t-1])."""
def __init__(self,
transition_network,
num_categories):
"""Construct a `DiscreteStateTransition` instance.
Args:
transition_network: a `callable` network taking batch conditional inputs,
`x[t-1]`, and returning the discrete state transition matrices,
`log p(s[t] |s[t-1], x[t-1])`.
num_categories: an `int` scalar for number of discrete states, `s`.
"""
super(DiscreteStateTransition, self).__init__()
self.dense_net = transition_network
self.num_categ = num_categories
def call(self, input_tensor, dtype=tf.float32):
input_tensor = tf.convert_to_tensor(input_tensor, dtype_hint=dtype)
batch_size, num_steps = tf.unstack(tf.shape(input_tensor)[:2])
transition_tensor = self.dense_net(input_tensor)
transition_tensor = tf.reshape(
transition_tensor,
[batch_size, num_steps, self.num_categ, self.num_categ])
return transition_tensor
@property
def output_event_dims(self):
return self.num_categ
class GaussianDistributionFromMean(tf.keras.Model):
"""Emission model p(x[t] | z[t])."""
def __init__(self,
emission_mean_network,
observation_dim,
cov_mat=None,
use_triangular_cov=False,
use_trainable_cov=True,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=tf.float32,
name="GaussianDistributionFromMean"):
"""Construct a `GaussianDistributionFromMean` instance.
Args:
emission_mean_network: a `callable` network taking continuous hidden
states, `z[t]`, and returning the mean of emission distribution,
`p(x[t] | z[t])`.
observation_dim: an `int` scalar for dimension of observations, `x`.
cov_mat: an optional `float` Tensor for predefined covariance matrix.
Default to `None`, in which case, a `cov` variable will be created.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
use_trainable_cov: a `bool` scalar indicating whether the scale of
the distribution is trainable. Default to False.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `tf.float32`.
name: a `str` to construct names of variables.
"""
super(GaussianDistributionFromMean, self).__init__()
self.observation_dim = observation_dim
self.x_emission_net = emission_mean_network
self.use_triangular_cov = use_triangular_cov
if cov_mat:
self.cov_mat = cov_mat
elif self.use_triangular_cov:
local_variable = tf.Variable(
tf.random.uniform(
shape=[int(self.observation_dim*(self.observation_dim+1)/2)],
minval=0., maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov)
self.cov_mat = tfp.math.fill_triangular(
local_variable)
self.cov_mat = tf.maximum(tf.nn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
else:
self.cov_mat = tf.Variable(
initial_value=tf.random.uniform(shape=[self.observation_dim],
minval=0.0, maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov)
self.cov_mat = tf.maximum(tf.nn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
def call(self, input_tensor, dtype=tf.float32):
input_tensor = tf.convert_to_tensor(input_tensor, dtype_hint=dtype)
mean_tensor = self.x_emission_net(input_tensor)
if self.use_triangular_cov:
output_dist = tfd.MultivariateNormalTriL(
loc=mean_tensor,
scale_tril=self.cov_mat)
else:
output_dist = tfd.MultivariateNormalDiag(
loc=mean_tensor,
scale_diag=self.cov_mat)
return tfp.experimental.as_composite(output_dist)
@property
def output_event_dims(self):
return self.observation_dim
class RnnInferenceNetwork(tf.keras.Model):
"""Inference network for posterior q(z[1:T] | x[1:T])."""
def __init__(self,
posterior_rnn,
posterior_dist,
latent_dim,
embedding_network=None):
"""Construct a `RnnInferenceNetwork` instance.
Args:
posterior_rnn: a RNN cell, `h[t]=f_RNN(h[t-1], z[t-1], input[t])`,
which recursively takes previous step RNN states `h`, previous step
sampled dynamical state `z[t-1]`, and conditioned input `input[t]`.
posterior_dist: a distribution instance for `p(z[t] | h[t])`,
where h[t] is the output of `posterior_rnn`.
latent_dim: an `int` scalar for dimension of continuous hidden
states, `z`.
embedding_network: an optional network to | |
<filename>rtCommon/fileWatcher.py
"""
FileWatcher implements a class that watches for files to be created in a directory and then
returns the notification that the files is now available.
The FileWatcher class is a virtual class of sorts with two underlying implementations, one
for Mac and Windows (WatchdogFileWatcher) and one for Linux (InotifyFileWatcher).
"""
import os
import sys
import time
import logging
import threading
from typing import Optional
from queue import Queue, Empty
from watchdog.events import PatternMatchingEventHandler # type: ignore
from rtCommon.utils import DebugLevels, demoDelay
from rtCommon.errors import StateError
class FileWatcher():
"""Virtual class to watch for the arrival of new files and notify."""
def __new__(cls):
if sys.platform in ("linux", "linux2"):
# create linux version
newcls = InotifyFileWatcher.__new__(InotifyFileWatcher)
newcls.__init__()
return newcls
elif sys.platform in ("darwin", "win32"):
# create Mac/Windows version
newcls = WatchdogFileWatcher.__new__(WatchdogFileWatcher)
newcls.__init__()
return newcls
else:
# unsupported os type
logging.log(logging.ERROR, "Unsupported os type %s" % (sys.platform))
return None
def __init__(self):
logging.log(logging.ERROR, "FileWatcher is abstract class. __init__ not implemented")
def __del__(self):
logging.log(logging.ERROR, "FileWatcher is abstract class. __del__ not implemented")
def initFileNotifier(self, dir, filePattern, minFileSize, demoStep=0):
logging.log(logging.ERROR, "FileWatcher is abstract class. initFileNotifier not implemented")
return None
def waitForFile(self, filename, timeout=0):
logging.log(logging.ERROR, "FileWatcher is abstract class. waitForFile not implemented")
return ''
if sys.platform in ("darwin", "win32"):
from watchdog.observers import Observer # type: ignore
# Version of FileWatcher for Mac and Windows
class WatchdogFileWatcher():
"""Version of FileWatcher for Mac and Windows using Watchdog toolkit."""
def __init__(self):
self.observer = None
self.fileNotifyHandler = None
self.fileNotifyQ = Queue() # type: None
self.filePattern = None
self.watchDir = None
self.minFileSize = 0
self.demoStep = 0
self.prevEventTime = 0
def __del__(self):
if self.observer is not None:
try:
self.observer.stop()
except Exception as err:
# TODO - change back to log once can figure out what the observer.stop streamRef error is
print("FileWatcher: oberver.stop(): %s", str(err))
def initFileNotifier(self, dir: str, filePattern: str, minFileSize: int, demoStep: int=0) -> None:
"""
Initialize the file watcher to watch in the specified directory for the specified
regex-based filepattern.
Args:
dir (str): Directory to watch in
filePattern (str): Regex-based filepattern to watch for
minFileSize (int): Minimum file size necessary to consider the file is wholely written.
Below this size the filewatcher will assume file is paritally written and continue
to wait.
demoStep (int): If non-zero then it will space out file notifications by demoStep seconds.
This is used when the image files are pre-existing but we want to simulate as if
the arrive from the scanner every few seconds (demoStep seconds).
"""
self.demoStep = demoStep
self.minFileSize = minFileSize
if self.observer is not None:
self.observer.stop()
self.observer = Observer()
if filePattern is None or filePattern == '':
filePattern = '*'
self.filePattern = filePattern
self.watchDir = dir
self.fileNotifyHandler = FileNotifyHandler(self.fileNotifyQ, [filePattern])
self.observer.schedule(self.fileNotifyHandler, dir, recursive=False)
self.observer.start()
def waitForFile(self, filename: str, timeout: int=0, timeCheckIncrement: int=1) -> Optional[str]:
"""
Wait for a specific filename to be created in the directory specified in initFileNotifier.
Args:
filename: Name of File to watch for creation of. If filename includes a path it must
match that specified in initFileNotifier.
timeout: Max number of seconds to watch for the file creation. If timeout expires
before the file is created then None will be returned
Returns:
The filename of the created file (same as input arg) or None if timeout expires
"""
_filedir, _filename = os.path.split(filename)
if _filedir in (None, ''):
filename = os.path.join(self.watchDir, filename)
elif _filedir != self.watchDir:
raise StateError(f"FileWatcher: file path doesn't match watch directory: {_filedir}, {self.watchDir}")
fileExists = os.path.exists(filename)
if not fileExists:
if self.observer is None:
raise FileNotFoundError("No fileNotifier and dicom file not found %s" % (filename))
else:
logStr = "FileWatcher: Waiting for file {}, timeout {}s ".format(filename, timeout)
logging.log(DebugLevels.L6, logStr)
eventLoopCount = 0
exitWithFileEvent = False
eventTimeStamp = 0
startTime = time.time()
timeToCheckForFile = time.time() + timeCheckIncrement # check if file exists at least every second
while not fileExists:
if timeout > 0 and time.time() > (startTime + timeout):
return None
# look for file creation event
eventLoopCount += 1
try:
event, ts = self.fileNotifyQ.get(block=True, timeout=timeCheckIncrement)
except Empty:
# The timeout occured on fileNotifyQ.get()
fileExists = os.path.exists(filename)
continue
if event is None:
raise StateError('waitForFile: event is None')
# We may have a stale event from a previous file if multiple events
# are created per file or if the previous file eventloop
# timed out and then the event arrived later.
if event.src_path == filename:
fileExists = True
exitWithFileEvent = True
eventTimeStamp = ts
continue
if time.time() > timeToCheckForFile:
# periodically check if file exists, can occur if we get
# swamped with unrelated events
fileExists = os.path.exists(filename)
timeToCheckForFile = time.time() + timeCheckIncrement
# wait for the full file to be written, wait at most 300 ms
waitIncrement = 0.1
totalWriteWait = 0.0
fileSize = os.path.getsize(filename)
while fileSize < self.minFileSize and totalWriteWait < 0.4:
time.sleep(waitIncrement)
totalWriteWait += waitIncrement
fileSize = os.path.getsize(filename)
logging.log(DebugLevels.L6,
"File avail: eventLoopCount %d, writeWaitTime %.3f, "
"fileEventCaptured %s, fileName %s, eventTimeStamp %.5f",
eventLoopCount, totalWriteWait,
exitWithFileEvent, filename, eventTimeStamp)
if self.demoStep is not None and self.demoStep > 0:
self.prevEventTime = demoDelay(self.demoStep, self.prevEventTime)
return filename
class FileNotifyHandler(PatternMatchingEventHandler): # type: ignore
"""
Handler class that will receive the watchdog notifications. It will queue the notifications
int the queue provided during to the init function.
"""
def __init__(self, q, patterns):
"""
Args:
q (queue): Queue into which file-creation notifications will be placed.
patterns (List[regex]): Filename patterns to watch for.
"""
super().__init__(patterns=patterns)
self.q = q
def on_created(self, event):
self.q.put((event, time.time()))
def on_modified(self, event):
self.q.put((event, time.time()))
# import libraries for Linux version
if sys.platform in ("linux", "linux2"):
import inotify
import inotify.adapters
# Version of FileWatcher for Linux
class InotifyFileWatcher():
"""Version of FileWatcher for Linux using Inotify interface."""
def __init__(self):
self.watchDir = None
self.minFileSize = 0
self.shouldExit = False
self.demoStep = 0
self.prevEventTime = 0
# create a listening thread
self.fileNotifyQ = Queue() # type: None
self.notifier = inotify.adapters.Inotify()
self.notify_thread = threading.Thread(name='inotify', target=self.notifyEventLoop)
self.notify_thread.setDaemon(True)
self.notify_thread.start()
def __del__(self):
self.shouldExit = True
self.notify_thread.join(timeout=2)
def initFileNotifier(self, dir: str, filePattern: str, minFileSize: int, demoStep: int=0) -> None:
"""
Initialize the file watcher to watch for files in the specified directory.
Note: inotify doesn't use filepatterns
Args:
dir (str): Directory to watch in
filePattern (str): ignored by inotify implementation
minFileSize (int): Minimum file size necessary to consider the file is wholely written.
Below this size the filewatcher will assume file is paritally written and continue
to wait.
demoStep (int): If non-zero then it will space out file notifications by demoStep seconds.
This is used when the image files are pre-existing but we want to simulate as if
the arrive from the scanner every few seconds (demoStep seconds).
"""
self.demoStep = demoStep
self.minFileSize = minFileSize
if dir is None:
raise StateError('initFileNotifier: dir is None')
if not os.path.exists(dir):
raise NotADirectoryError("No such directory: %s" % (dir))
if dir != self.watchDir:
if self.watchDir is not None:
self.notifier.remove_watch(self.watchDir)
self.watchDir = dir
self.notifier.add_watch(self.watchDir, mask=inotify.constants.IN_CLOSE_WRITE)
def waitForFile(self, filename: str, timeout: int=0, timeCheckIncrement: int=1) -> Optional[str]:
"""
Wait for a specific filename to be created in the directory specified in initFileNotifier.
Args:
filename: Name of File to watch for creation of. If filename includes a path it must
match that specified in initFileNotifier.
timeout: Max number of seconds to watch for the file creation. If timeout expires
before the file is created then None will be returned
Returns:
The filename of the created file (same as input arg) or None if timeout expires
"""
_filedir, _filename = os.path.split(filename)
if _filedir in (None, ''):
filename = os.path.join(self.watchDir, filename)
elif _filedir != self.watchDir:
raise StateError(f"FileWatcher: file path doesn't match watch directory: {_filedir}, {self.watchDir}")
fileExists = os.path.exists(filename)
if not fileExists:
if self.notify_thread is None:
raise FileNotFoundError("No fileNotifier and dicom file not found %s" % (filename))
else:
logStr = "FileWatcher: Waiting for file {}, timeout {}s ".format(filename, timeout)
logging.log(DebugLevels.L6, logStr)
eventLoopCount = 0
exitWithFileEvent = False
eventTimeStamp = 0
startTime = time.time()
timeToCheckForFile = time.time() + timeCheckIncrement # check if file exists at least every second
while not fileExists:
if timeout > 0 and time.time() > (startTime + timeout):
return None
# | |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ultracart.api_client import ApiClient
from ultracart.configuration import Configuration
class StorefrontApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
@classmethod
def fromApiKey(cls, apiKey, verify_ssl = True, debug = False):
config = Configuration()
config.api_key['x-ultracart-simple-key'] = apiKey
config.debug = debug
config.verify_ssl = verify_ssl
api_client = ApiClient(configuration=config, header_name='X-UltraCart-Api-Version', header_value='2017-03-01')
return StorefrontApi(api_client)
def add_to_library(self, add_library_request, **kwargs): # noqa: E501
"""Add to library # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_to_library(add_library_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AddLibraryItemRequest add_library_request: New library item request (required)
:return: LibraryItemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_to_library_with_http_info(add_library_request, **kwargs) # noqa: E501
else:
(data) = self.add_to_library_with_http_info(add_library_request, **kwargs) # noqa: E501
return data
def add_to_library_with_http_info(self, add_library_request, **kwargs): # noqa: E501
"""Add to library # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_to_library_with_http_info(add_library_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AddLibraryItemRequest add_library_request: New library item request (required)
:return: LibraryItemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['add_library_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_to_library" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'add_library_request' is set
if ('add_library_request' not in params or
params['add_library_request'] is None):
raise ValueError("Missing the required parameter `add_library_request` when calling `add_to_library`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'add_library_request' in params:
body_params = params['add_library_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/code_library', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryItemResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def apply_to_store_front(self, apply_library_request, **kwargs): # noqa: E501
"""Apply library item to storefront. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.apply_to_store_front(apply_library_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ApplyLibraryItemRequest apply_library_request: New library item (required)
:return: ApplyLibraryItemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.apply_to_store_front_with_http_info(apply_library_request, **kwargs) # noqa: E501
else:
(data) = self.apply_to_store_front_with_http_info(apply_library_request, **kwargs) # noqa: E501
return data
def apply_to_store_front_with_http_info(self, apply_library_request, **kwargs): # noqa: E501
"""Apply library item to storefront. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.apply_to_store_front_with_http_info(apply_library_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ApplyLibraryItemRequest apply_library_request: New library item (required)
:return: ApplyLibraryItemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['apply_library_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method apply_to_store_front" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'apply_library_request' is set
if ('apply_library_request' not in params or
params['apply_library_request'] is None):
raise ValueError("Missing the required parameter `apply_library_request` when calling `apply_to_store_front`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'apply_library_request' in params:
body_params = params['apply_library_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/code_library/apply', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplyLibraryItemResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def archive_email_list(self, storefront_oid, email_list_uuid, **kwargs): # noqa: E501
"""Archive email list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_email_list(storefront_oid, email_list_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_list_uuid: (required)
:return: EmailListArchiveResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.archive_email_list_with_http_info(storefront_oid, email_list_uuid, **kwargs) # noqa: E501
else:
(data) = self.archive_email_list_with_http_info(storefront_oid, email_list_uuid, **kwargs) # noqa: E501
return data
def archive_email_list_with_http_info(self, storefront_oid, email_list_uuid, **kwargs): # noqa: E501
"""Archive email list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_email_list_with_http_info(storefront_oid, email_list_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_list_uuid: (required)
:return: EmailListArchiveResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_list_uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method archive_email_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `archive_email_list`") # noqa: E501
# verify the required parameter 'email_list_uuid' is set
if ('email_list_uuid' not in params or
params['email_list_uuid'] is None):
raise ValueError("Missing the required parameter `email_list_uuid` when calling `archive_email_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
if 'email_list_uuid' in params:
path_params['email_list_uuid'] = params['email_list_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/lists/{email_list_uuid}/archive', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailListArchiveResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def archive_email_segment(self, storefront_oid, email_segment_uuid, **kwargs): # noqa: E501
"""Archive email segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_email_segment(storefront_oid, email_segment_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_segment_uuid: (required)
:return: EmailSegmentArchiveResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.archive_email_segment_with_http_info(storefront_oid, email_segment_uuid, **kwargs) # noqa: E501
else:
(data) = self.archive_email_segment_with_http_info(storefront_oid, email_segment_uuid, **kwargs) # noqa: E501
return data
def archive_email_segment_with_http_info(self, storefront_oid, email_segment_uuid, **kwargs): # noqa: E501
"""Archive email segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_email_segment_with_http_info(storefront_oid, email_segment_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_segment_uuid: (required)
:return: EmailSegmentArchiveResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_segment_uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method archive_email_segment" % key
)
params[key] = val
del params['kwargs']
# verify the | |
from .typing import Any, Dict, Iterable, List, Mapping, Optional, TypedDict, Union
from .url import filename_to_uri
import os
import sublime
TextDocumentSyncKindNone = 0
TextDocumentSyncKindFull = 1
TextDocumentSyncKindIncremental = 2
class DiagnosticSeverity:
Error = 1
Warning = 2
Information = 3
Hint = 4
class DiagnosticTag:
Unnecessary = 1
Deprecated = 2
class CompletionItemTag:
Deprecated = 1
class SymbolTag:
Deprecated = 1
class InsertTextFormat:
PlainText = 1
Snippet = 2
class DocumentHighlightKind:
Text = 1
Read = 2
Write = 3
class SignatureHelpTriggerKind:
Invoked = 1
TriggerCharacter = 2
ContentChange = 3
class InsertTextMode:
AsIs = 1
AdjustIndentation = 2
DocumentUri = str
Position = TypedDict('Position', {
'line': int,
'character': int
})
RangeLsp = TypedDict('RangeLsp', {
'start': Position,
'end': Position
})
TextDocumentIdentifier = TypedDict('TextDocumentIdentifier', {
'uri': DocumentUri,
}, total=True)
TextDocumentPositionParams = TypedDict('TextDocumentPositionParams', {
'textDocument': TextDocumentIdentifier,
'position': Position,
}, total=True)
ExperimentalTextDocumentRangeParams = TypedDict('ExperimentalTextDocumentRangeParams', {
'textDocument': TextDocumentIdentifier,
'position': Position,
'range': RangeLsp,
}, total=True)
CodeDescription = TypedDict('CodeDescription', {
'href': str
}, total=True)
ExecuteCommandParams = TypedDict('ExecuteCommandParams', {
'command': str,
'arguments': Optional[List[Any]],
}, total=False)
Command = TypedDict('Command', {
'title': str,
'command': str,
'arguments': Optional[List[Any]],
}, total=True)
CodeAction = TypedDict('CodeAction', {
'title': str,
'kind': Optional[str],
'diagnostics': Optional[List[Any]],
'isPreferred': Optional[bool],
'edit': Optional[dict],
'command': Optional[Command],
}, total=True)
CodeLens = TypedDict('CodeLens', {
'range': RangeLsp,
'command': Optional[Command],
'data': Any,
# Custom property to bring along the name of the session
'session_name': Optional[str]
}, total=True)
ParameterInformation = TypedDict('ParameterInformation', {
'label': Union[str, List[int]],
'documentation': Union[str, Dict[str, str]]
}, total=False)
SignatureInformation = TypedDict('SignatureInformation', {
'label': str,
'documentation': Union[str, Dict[str, str]],
'parameters': List[ParameterInformation]
}, total=False)
SignatureHelp = TypedDict('SignatureHelp', {
'signatures': List[SignatureInformation],
'activeSignature': int,
'activeParameter': int,
}, total=False)
SignatureHelpContext = TypedDict('SignatureHelpContext', {
'triggerKind': int,
'triggerCharacter': str,
'isRetrigger': bool,
'activeSignatureHelp': SignatureHelp
}, total=False)
Location = TypedDict('Location', {
'uri': DocumentUri,
'range': RangeLsp
}, total=True)
DocumentSymbol = TypedDict('DocumentSymbol', {
'name': str,
'detail': Optional[str],
'kind': int,
'tags': Optional[List[int]],
'deprecated': Optional[bool],
'range': RangeLsp,
'selectionRange': RangeLsp,
'children': Optional[List[Any]] # mypy doesn't support recurive types like Optional[List['DocumentSymbol']]
}, total=True)
SymbolInformation = TypedDict('SymbolInformation', {
'name': str,
'kind': int,
'tags': Optional[List[int]],
'deprecated': Optional[bool],
'location': Location,
'containerName': Optional[str]
}, total=True)
LocationLink = TypedDict('LocationLink', {
'originSelectionRange': Optional[RangeLsp],
'targetUri': DocumentUri,
'targetRange': RangeLsp,
'targetSelectionRange': RangeLsp
}, total=False)
DiagnosticRelatedInformation = TypedDict('DiagnosticRelatedInformation', {
'location': Location,
'message': str
}, total=False)
Diagnostic = TypedDict('Diagnostic', {
'range': RangeLsp,
'severity': int,
'code': Union[int, str],
'codeDescription': CodeDescription,
'source': str,
'message': str,
'tags': List[int],
'relatedInformation': List[DiagnosticRelatedInformation]
}, total=False)
TextEdit = TypedDict('TextEdit', {
'newText': str,
'range': RangeLsp
}, total=True)
CompletionItemLabelDetails = TypedDict('CompletionItemLabelDetails', {
'detail': str,
'description': str
}, total=False)
CompletionItem = TypedDict('CompletionItem', {
'additionalTextEdits': List[TextEdit],
'command': Command,
'commitCharacters': List[str],
'data': Any,
'deprecated': bool,
'detail': str,
'documentation': Union[str, Dict[str, str]],
'filterText': str,
'insertText': str,
'insertTextFormat': InsertTextFormat,
'insertTextMode': InsertTextMode,
'kind': int,
'label': str,
'labelDetails': CompletionItemLabelDetails,
'preselect': bool,
'sortText': str,
'tags': List[int],
'textEdit': TextEdit
}, total=False)
CompletionList = TypedDict('CompletionList', {
'isIncomplete': bool,
'items': List[CompletionItem],
}, total=True)
MarkedString = Union[str, Dict[str, str]]
MarkupContent = Dict[str, str]
Hover = TypedDict('Hover', {
'contents': Union[MarkedString, MarkupContent, List[MarkedString]],
'range': RangeLsp,
}, total=False)
PublishDiagnosticsParams = TypedDict('PublishDiagnosticsParams', {
'uri': DocumentUri,
'version': Optional[int],
'diagnostics': List[Diagnostic],
}, total=False)
FileSystemWatcher = TypedDict('FileSystemWatcher', {
'globPattern': str,
'kind': int,
}, total=True)
DidChangeWatchedFilesRegistrationOptions = TypedDict('DidChangeWatchedFilesRegistrationOptions', {
'watchers': List[FileSystemWatcher],
}, total=True)
WatchKind = int
WatchKindCreate = 1
WatchKindChange = 2
WatchKindDelete = 4
FileChangeType = int
FileChangeTypeCreated = 1
FileChangeTypeChanged = 2
FileChangeTypeDeleted = 3
FileEvent = TypedDict("FileEvent", {
"uri": DocumentUri,
"type": FileChangeType,
}, total=True)
class Request:
__slots__ = ('method', 'params', 'view', 'progress')
def __init__(
self,
method: str,
params: Optional[Mapping[str, Any]] = None,
view: Optional[sublime.View] = None,
progress: bool = False
) -> None:
self.method = method
self.params = params
self.view = view
self.progress = progress # type: Union[bool, str]
@classmethod
def initialize(cls, params: Mapping[str, Any]) -> 'Request':
return Request("initialize", params)
@classmethod
def complete(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request("textDocument/completion", params, view)
@classmethod
def signatureHelp(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request("textDocument/signatureHelp", params, view)
@classmethod
def codeAction(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request("textDocument/codeAction", params, view)
@classmethod
def documentColor(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request('textDocument/documentColor', params, view)
@classmethod
def willSaveWaitUntil(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request("textDocument/willSaveWaitUntil", params, view)
@classmethod
def documentSymbols(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request("textDocument/documentSymbol", params, view)
@classmethod
def documentHighlight(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request("textDocument/documentHighlight", params, view)
@classmethod
def resolveCompletionItem(cls, params: CompletionItem, view: sublime.View) -> 'Request':
return Request("completionItem/resolve", params, view)
@classmethod
def shutdown(cls) -> 'Request':
return Request("shutdown")
def __repr__(self) -> str:
return self.method + " " + str(self.params)
def to_payload(self, id: int) -> Dict[str, Any]:
return {
"jsonrpc": "2.0",
"id": id,
"method": self.method,
"params": self.params
}
@classmethod
def semanticTokens(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request("textDocument/semanticTokens/full", params, view)
@classmethod
def semanticTokensDelta(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request("textDocument/semanticTokens/full/delta", params, view)
@classmethod
def semanticTokensRange(cls, params: Mapping[str, Any], view: sublime.View) -> 'Request':
return Request("textDocument/semanticTokens/range", params, view)
class ErrorCode:
# Defined by JSON RPC
ParseError = -32700
InvalidRequest = -32600
MethodNotFound = -32601
InvalidParams = -32602
InternalError = -32603
ServerErrorStart = -32099
ServerErrorEnd = -32000
ServerNotInitialized = -32002
UnknownErrorCode = -32001
# Defined by the protocol
RequestCancelled = -32800
ContentModified = -32801
class Error(Exception):
def __init__(self, code: int, message: str, data: Any = None) -> None:
super().__init__(message)
self.code = code
self.data = data
@classmethod
def from_lsp(cls, params: Any) -> "Error":
return Error(params["code"], params["message"], params.get("data"))
def to_lsp(self) -> Dict[str, Any]:
result = {"code": self.code, "message": super().__str__()}
if self.data:
result["data"] = self.data
return result
def __str__(self) -> str:
return "{} ({})".format(super().__str__(), self.code)
@classmethod
def from_exception(cls, ex: Exception) -> 'Error':
return Error(ErrorCode.InternalError, str(ex))
class Response:
__slots__ = ('request_id', 'result')
def __init__(self, request_id: Any, result: Union[None, Mapping[str, Any], Iterable[Any]]) -> None:
self.request_id = request_id
self.result = result
def to_payload(self) -> Dict[str, Any]:
r = {
"id": self.request_id,
"jsonrpc": "2.0",
"result": self.result
}
return r
class Notification:
__slots__ = ('method', 'params')
def __init__(self, method: str, params: Optional[Mapping[str, Any]] = None) -> None:
self.method = method
self.params = params
@classmethod
def initialized(cls) -> 'Notification':
return Notification("initialized", {})
@classmethod
def didOpen(cls, params: dict) -> 'Notification':
return Notification("textDocument/didOpen", params)
@classmethod
def didChange(cls, params: dict) -> 'Notification':
return Notification("textDocument/didChange", params)
@classmethod
def willSave(cls, params: dict) -> 'Notification':
return Notification("textDocument/willSave", params)
@classmethod
def didSave(cls, params: dict) -> 'Notification':
return Notification("textDocument/didSave", params)
@classmethod
def didClose(cls, params: dict) -> 'Notification':
return Notification("textDocument/didClose", params)
@classmethod
def didChangeConfiguration(cls, params: dict) -> 'Notification':
return Notification("workspace/didChangeConfiguration", params)
@classmethod
def didChangeWatchedFiles(cls, params: dict) -> 'Notification':
return Notification("workspace/didChangeWatchedFiles", params)
@classmethod
def didChangeWorkspaceFolders(cls, params: dict) -> 'Notification':
return Notification("workspace/didChangeWorkspaceFolders", params)
@classmethod
def exit(cls) -> 'Notification':
return Notification("exit")
def __repr__(self) -> str:
return self.method + " " + str(self.params)
def to_payload(self) -> Dict[str, Any]:
return {
"jsonrpc": "2.0",
"method": self.method,
"params": self.params
}
class Point(object):
def __init__(self, row: int, col: int) -> None:
self.row = int(row)
self.col = int(col) # in UTF-16
def __repr__(self) -> str:
return "{}:{}".format(self.row, self.col)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Point):
raise NotImplementedError()
return self.row == other.row and self.col == other.col
@classmethod
def from_lsp(cls, point: Position) -> 'Point':
return Point(point['line'], point['character'])
def to_lsp(self) -> Position:
return {
"line": self.row,
"character": self.col
}
class Range(object):
def __init__(self, start: Point, end: Point) -> None:
self.start = start
self.end = end
def __repr__(self) -> str:
return "({} {})".format(self.start, self.end)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Range):
raise NotImplementedError()
return self.start == other.start and self.end == other.end
@classmethod
def from_lsp(cls, range: RangeLsp) -> 'Range':
return Range(Point.from_lsp(range['start']), Point.from_lsp(range['end']))
def to_lsp(self) -> RangeLsp:
return {
'start': self.start.to_lsp(),
'end': self.end.to_lsp()
}
def contains(self, point: Point) -> bool:
return self.start.row <= point.row <= self.end.row and \
(self.end.row > point.row or self.start.col <= point.col <= self.end.col)
def intersects(self, rge: 'Range') -> bool:
return self.contains(rge.start) or self.contains(rge.end) or \
rge.contains(self.start) or rge.contains(self.end)
def extend(self, rge: 'Range') -> 'Range':
"""
Extends current range to fully include another range. If another range is already fully
enclosed within the current range then nothing changes.
:param rge: The region to extend current with
:returns: The extended region (itself)
"""
if rge.contains(self.start):
self.start = rge.start
if rge.contains(self.end):
self.end = rge.end
return self
class WorkspaceFolder:
__slots__ = ('name', 'path')
def __init__(self, name: str, path: str) -> None:
self.name = name
self.path = path
@classmethod
def from_path(cls, path: str) -> 'WorkspaceFolder':
return cls(os.path.basename(path) or path, path)
def __hash__(self) -> int:
return hash((self.name, self.path))
def __repr__(self) -> str:
return "{}('{}', '{}')".format(self.__class__.__name__, self.name, self.path)
def __str__(self) -> str:
return self.path
def __eq__(self, other: Any) -> bool:
if isinstance(other, WorkspaceFolder):
return self.name == | |
self._remove_empty_and_cast(tuple_data)
raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type))
class QueryParameter(ParameterBase, StyleFormSerializer):
def __init__(
self,
name: str,
required: bool = False,
style: typing.Optional[ParameterStyle] = None,
explode: bool = False,
allow_reserved: typing.Optional[bool] = None,
schema: typing.Optional[typing.Type[Schema]] = None,
content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None
):
super().__init__(
name,
in_type=ParameterInType.QUERY,
required=required,
style=style,
explode=explode,
allow_reserved=allow_reserved,
schema=schema,
content=content
)
def __serialize_space_delimited(
self,
in_data: typing.Union[None, int, float, str, bool, dict, list]
) -> typing.Tuple[typing.Tuple[str, str], ...]:
separator = '%20'
empty_val = ()
return self._serialize_x(
in_data,
style=ParameterStyle.SPACE_DELIMITED,
name=self.name,
explode=self.explode,
separator=separator,
empty_val=empty_val
)
def __serialize_pipe_delimited(
self,
in_data: typing.Union[None, int, float, str, bool, dict, list]
) -> typing.Tuple[typing.Tuple[str, str], ...]:
separator = '|'
empty_val = ()
return self._serialize_x(
in_data,
style=ParameterStyle.PIPE_DELIMITED,
name=self.name,
explode=self.explode,
separator=separator,
empty_val=empty_val
)
def serialize(
self,
in_data: typing.Union[
Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict]
) -> typing.Tuple[typing.Tuple[str, str]]:
if self.schema:
cast_in_data = self.schema(in_data)
cast_in_data = self._json_encoder.default(cast_in_data)
"""
form -> query
query:
- GET/HEAD/DELETE: could use fields
- PUT/POST: must use urlencode to send parameters
returns fields: tuple
spaceDelimited -> query
returns fields
pipeDelimited -> query
returns fields
deepObject -> query, https://github.com/OAI/OpenAPI-Specification/issues/1706
returns fields
"""
if self.style:
# TODO update query ones to omit setting values when [] {} or None is input
if self.style is ParameterStyle.FORM:
return self._serialize_form(cast_in_data, explode=self.explode, name=self.name)
elif self.style is ParameterStyle.SPACE_DELIMITED:
return self.__serialize_space_delimited(cast_in_data)
elif self.style is ParameterStyle.PIPE_DELIMITED:
return self.__serialize_pipe_delimited(cast_in_data)
# self.content will be length one
for content_type, schema in self.content.items():
cast_in_data = schema(in_data)
cast_in_data = self._json_encoder.default(cast_in_data)
if content_type == self._json_content_type:
return self._serialize_json(cast_in_data)
raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type))
class CookieParameter(ParameterBase, StyleFormSerializer):
def __init__(
self,
name: str,
required: bool = False,
style: typing.Optional[ParameterStyle] = None,
explode: bool = False,
allow_reserved: typing.Optional[bool] = None,
schema: typing.Optional[typing.Type[Schema]] = None,
content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None
):
super().__init__(
name,
in_type=ParameterInType.COOKIE,
required=required,
style=style,
explode=explode,
allow_reserved=allow_reserved,
schema=schema,
content=content
)
def serialize(
self,
in_data: typing.Union[
Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict]
) -> typing.Tuple[typing.Tuple[str, str]]:
if self.schema:
cast_in_data = self.schema(in_data)
cast_in_data = self._json_encoder.default(cast_in_data)
"""
form -> cookie
returns fields: tuple
"""
if self.style:
return self._serialize_form(cast_in_data, explode=self.explode, name=self.name)
# self.content will be length one
for content_type, schema in self.content.items():
cast_in_data = schema(in_data)
cast_in_data = self._json_encoder.default(cast_in_data)
if content_type == self._json_content_type:
return self._serialize_json(cast_in_data)
raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type))
class HeaderParameter(ParameterBase, StyleSimpleSerializer):
def __init__(
self,
name: str,
required: bool = False,
style: typing.Optional[ParameterStyle] = None,
explode: bool = False,
allow_reserved: typing.Optional[bool] = None,
schema: typing.Optional[typing.Type[Schema]] = None,
content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None
):
super().__init__(
name,
in_type=ParameterInType.HEADER,
required=required,
style=style,
explode=explode,
allow_reserved=allow_reserved,
schema=schema,
content=content
)
@staticmethod
def __to_headers(in_data: typing.Tuple[typing.Tuple[str, str], ...]) -> HTTPHeaderDict[str, str]:
data = tuple(t for t in in_data if t)
headers = HTTPHeaderDict()
if not data:
return headers
headers.extend(data)
return headers
def _serialize_simple(
self,
in_data: typing.Union[None, int, float, str, bool, dict, list],
) -> HTTPHeaderDict[str, str]:
tuple_data = self._serialize_simple_tuple(in_data, self.name, self.explode, self.in_type)
return self.__to_headers(tuple_data)
def serialize(
self,
in_data: typing.Union[
Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict]
) -> HTTPHeaderDict[str, str]:
if self.schema:
cast_in_data = self.schema(in_data)
cast_in_data = self._json_encoder.default(cast_in_data)
"""
simple -> header
headers: PoolManager needs a mapping, tuple is close
returns headers: dict
"""
if self.style:
return self._serialize_simple(cast_in_data)
# self.content will be length one
for content_type, schema in self.content.items():
cast_in_data = schema(in_data)
cast_in_data = self._json_encoder.default(cast_in_data)
if content_type == self._json_content_type:
tuple_data = self._serialize_json(cast_in_data)
return self.__to_headers(tuple_data)
raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type))
class Encoding:
def __init__(
self,
content_type: str,
headers: typing.Optional[typing.Dict[str, HeaderParameter]] = None,
style: typing.Optional[ParameterStyle] = None,
explode: bool = False,
allow_reserved: bool = False,
):
self.content_type = content_type
self.headers = headers
self.style = style
self.explode = explode
self.allow_reserved = allow_reserved
class MediaType:
"""
Used to store request and response body schema information
encoding:
A map between a property name and its encoding information.
The key, being the property name, MUST exist in the schema as a property.
The encoding object SHALL only apply to requestBody objects when the media type is
multipart or application/x-www-form-urlencoded.
"""
def __init__(
self,
schema: typing.Type[Schema],
encoding: typing.Optional[typing.Dict[str, Encoding]] = None,
):
self.schema = schema
self.encoding = encoding
@dataclass
class ApiResponse:
response: urllib3.HTTPResponse
body: typing.Union[Unset, typing.Type[Schema]]
headers: typing.Union[Unset, typing.List[HeaderParameter]]
def __init__(
self,
response: urllib3.HTTPResponse,
body: typing.Union[Unset, typing.Type[Schema]],
headers: typing.Union[Unset, typing.List[HeaderParameter]]
):
"""
pycharm needs this to prevent 'Unexpected argument' warnings
"""
self.response = response
self.body = body
self.headers = headers
@dataclass
class ApiResponseWithoutDeserialization(ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[Unset, typing.Type[Schema]] = unset
headers: typing.Union[Unset, typing.List[HeaderParameter]] = unset
class OpenApiResponse:
def __init__(
self,
response_cls: typing.Type[ApiResponse] = ApiResponse,
content: typing.Optional[typing.Dict[str, MediaType]] = None,
headers: typing.Optional[typing.List[HeaderParameter]] = None,
):
self.headers = headers
if content is not None and len(content) == 0:
raise ValueError('Invalid value for content, the content dict must have >= 1 entry')
self.content = content
self.response_cls = response_cls
@staticmethod
def __deserialize_json(response: urllib3.HTTPResponse) -> typing.Any:
decoded_data = response.data.decode("utf-8")
return json.loads(decoded_data)
@staticmethod
def __file_name_from_content_disposition(content_disposition: typing.Optional[str]) -> typing.Optional[str]:
if content_disposition is None:
return None
match = re.search('filename="(.+?)"', content_disposition)
if not match:
return None
return match.group(1)
def __deserialize_application_octet_stream(
self, response: urllib3.HTTPResponse
) -> typing.Union[bytes, io.BufferedReader]:
"""
urllib3 use cases:
1. when preload_content=True (stream=False) then supports_chunked_reads is False and bytes are returned
2. when preload_content=False (stream=True) then supports_chunked_reads is True and
a file will be written and returned
"""
if response.supports_chunked_reads():
file_name = self.__file_name_from_content_disposition(response.headers.get('content-disposition'))
if file_name is None:
_fd, path = tempfile.mkstemp()
else:
path = os.path.join(tempfile.gettempdir(), file_name)
# TODO get file_name from the filename at the end of the url if it exists
with open(path, 'wb') as new_file:
chunk_size = 1024
while True:
data = response.read(chunk_size)
if not data:
break
new_file.write(data)
# release_conn is needed for streaming connections only
response.release_conn()
new_file = open(path, 'rb')
return new_file
else:
return response.data
def deserialize(self, response: urllib3.HTTPResponse, configuration: Configuration) -> ApiResponse:
content_type = response.getheader('content-type')
deserialized_body = unset
streamed = response.supports_chunked_reads()
if self.content is not None:
if content_type == 'application/json':
body_data = self.__deserialize_json(response)
elif content_type == 'application/octet-stream':
body_data = self.__deserialize_application_octet_stream(response)
else:
raise NotImplementedError('Deserialization of {} has not yet been implemented'.format(content_type))
body_schema = self.content[content_type].schema
_instantiation_metadata = InstantiationMetadata(from_server=True, configuration=configuration)
deserialized_body = body_schema._from_openapi_data(
body_data, _instantiation_metadata=_instantiation_metadata)
elif streamed:
response.release_conn()
deserialized_headers = unset
if self.headers is not None:
deserialized_headers = unset
return self.response_cls(
response=response,
headers=deserialized_headers,
body=deserialized_body
)
class ApiClient:
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
_pool = None
__json_encoder = JSONEncoder()
def __init__(
self,
configuration: typing.Optional[Configuration] = None,
header_name: typing.Optional[str] = None,
header_value: typing.Optional[str] = None,
cookie: typing.Optional[str] = None,
pool_threads: int = 1
):
if configuration is None:
configuration = Configuration()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/1.0.0/python'
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None,
headers: typing.Optional[HTTPHeaderDict] = None,
body: typing.Optional[typing.Union[str, | |
<reponame>r3k4t/AutoTorIP
import os
import sys
import requests
import socks
import time
from colorama import init
init(strip=not sys.stdout.isatty())
from termcolor import cprint
from pyfiglet import figlet_format
os.system("clear")
cprint(figlet_format('AutoTorIP'),'green',attrs=['bold'])
print (chr(27)+"[34m"+" Version : 3.0")
print
print (chr(27)+"[33m"+"Author : <NAME>(RKT)")
print (chr(27)+"[33m"+"Github : https://github.com/r3k4t")
def get_tor_ip():
session = requests.session()
session.proxies = {}
session.proxies['http']='socks5://localhost:9050'
session.proxies['https']='socks5://localhost:9050'
try:
r = session.get('http://httpbin.org/ip')
except Exception as e:
print (e)
else:
return r.text
if __name__ == "__main__":
for i in range(1):
time.sleep(1)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor restart")
time.sleep(5)
print (get_tor_ip())
time.sleep(5)
os.system("sudo service tor | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############################################################################
##
## This file is part of Tango Control System
##
## http://www.tango-controls.org/
##
## Author: <NAME>
##
## This is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
###########################################################################
"""
.. panic.py: python API for a PyAlarm based alarms system
:mod:`panic` -- Package for Alarms and Notification of Incidences from Controls
===============================================================================
.. This package is great.
.. raw:: html
<hr/>
.. rubric:: Usage
And here some usage examples.
.. raw:: html
<hr/>
"""
import traceback,re,time,os,sys
import fandango
import fandango as fn
from fandango import first,searchCl,matchCl,clmatch,clsearch,\
isString,isSequence,isNumber,isFalse,isCallable,isMapping,\
xor,now,str2time,time2str,END_OF_TIME,Cached
from fandango.dicts import defaultdict
from fandango.tango import CachedAttributeProxy, AttrDataFormat, retango
from fandango.tango import PyTango,get_tango_host, check_device_cached
from fandango.tango import parse_tango_model
from fandango.log import tracer,shortstr
from .properties import *
get_tango = fandango.tango.get_database
_proxies = fandango.ProxiesDict()
GetProxy = _proxies.get
SetProxy = _proxies.__setitem__
"""
The _proxies object allows to retrieve DeviceProxy or DeviceServer objects.
* GetProxy(a/dev/name) will return a DeviceProxy by default.
* SetProxy('a/dev/name',object) allows to set a different object
to be returned (e.g. a device running in the same process)
"""
## Methods for matching device/attribute/alarm names
def intersect(a,b):
a,b = str(a).lower(),str(b).lower()
return a in b or b in a
def anyendswith(a,b):
a,b = str(a).lower(),str(b).lower()
if '/' not in a: a = '/'+a
if '/' not in b: b = '/'+b
return a.endswith(b) or b.endswith(a)
def unicode2str(obj):
if isMapping(obj,strict=True):
n = dict(unicode2str(t) for t in obj.items())
elif isSequence(obj):
n = list(unicode2str(t) for t in obj)
elif isString(obj): n = str(obj)
else: n = obj
return n
def substitute(message, substitutions=[[], {}], depth=1):
"""
Substitute `{%x%}` items values provided by substitutions
:param message: message to be substituted
:param substitutions: list of list and dictionary.
List is used for {%number%} substitutions and dictionary for
{%name%} substitutions
:param depth: defines number of pass
:return: substituted message
"""
if not isinstance(message, str):
raise Exception('StringExpected!')
if depth <1 :
return message
new_message = message
# substitute numbered substitutions
i = 0
for value in substitutions[0]:
new_message = new_message.replace("{%%%d%%}" % i, value)
i += 1
# processing named substitutions
for (k, value) in substitutions[1].items():
new_message = new_message.replace("{%%%s%%}" % k, value)
return substitute(new_message, substitutions, depth-1)
###############################################################################
#@todo: Tango access methods
def getAttrValue(obj,default=Exception):
"""
Extracts rvalue in tango/taurus3/4 compatible way
If default = True, obj is returned
If default = Exception, only exception objects are returned (else None)
"""
rm = getattr(obj,'read',None)
if isCallable(rm):
try:
obj = rm()
except Exception,e:
obj = e
if default is Exception:
if isinstance(obj,(PyTango.DevError,Exception)):
return obj
default = None
r,v,d = getattr(obj,'rvalue',None),None,None
if r is None:
v = getattr(obj,'value',None)
if v is None:
d = obj if default is True else default
#print('getAttrValue(%s)'%fd.shortstr(obj)
#+': %s,%s,%s'%(r,v,d))
r = r or v or d
if r is None and \
getattr(obj,'data_format',None) == AttrDataFormat.SPECTRUM \
and obj.is_empty:
r = []
return r
def getPanicProperty(prop):
"""
Method to obtain global properties
It manages compatibility with PANIC <= 6 using PyAlarm properties
BUT!!! It triggers exceptions in device servers if called at Init()
"""
print('getPanicProperty(%s)' % prop)
k = [prop] if not fn.isSequence(prop) else prop
r = get_tango().get_property('PANIC',k)
if not any(r.values()):
r = get_tango().get_class_property('PyAlarm',k)
r = r if fn.isSequence(prop) else r[prop]
return list(r) if fn.isSequence(r) else r
def setPanicProperty(prop, value):
"""
Method to write global properties
It manages compatibility with PANIC <= 6 using PyAlarm properties
"""
print('setPanicProperty(%s, %s)' % (prop, value))
r = get_tango().get_property('PANIC',[prop])[prop]
o = get_tango().get_class_property('PyAlarm',[prop])[prop]
if o and not r:
get_tango().put_class_property('PyAlarm',{prop:value})
return 'PyAlarm'
else:
get_tango().put_property('PANIC',{prop:value})
return 'PANIC'
def getAlarmDeviceProperties(device):
""" Method used in all panic classes """
props = get_tango().get_device_property(device,ALARM_TABLES.keys())
#Updating old property names for backwards compatibility
if not props['AlarmList']:
props['AlarmList'] = get_tango().get_device_property(
device,['AlarmsList'])['AlarmsList']
if props['AlarmList']:
print('%s: AlarmsList property renamed to AlarmList'%device)
get_tango().put_device_property(
device,{'AlarmList':props['AlarmList'],'AlarmsList':[]})
return props
def getAlarmDeviceProperty(device, prop):
""" Gets the value of pointed property from the device """
return get_tango().get_device_property(device,[prop])[prop]
def setAlarmDeviceProperty(device, prop, value):
""" Sets property of the device """
get_tango().put_device_property(device,{prop:[value]})
###############################################################################
# Alarm object used by API
class Alarm(object):
"""
Alarm object used by API's to keep the state of alarms
It maintains 3 time variables:
self.updated : last time that set_active was called (active or not)
self.active : time at which the alarm was activated (only if active)
self._time : last time that the alarm state changed
"""
def __init__(self,tag,device='',formula='',description='',receivers='',
config='', severity='',api=None):
#Info from the database
self.api = api
self.setup(tag,device,formula,description,
receivers,config,severity,write=False)
self.clear()
def setup(self,tag=None,device=None,formula=None,description=None,
receivers=None,config=None, severity=None,write=False):
""" Assigns values to Alarm struct """
notNone = lambda v,default: default
setVar = lambda k,v: setattr(self,k,v if v is not None
else getattr(self,k,''))
[setVar(k,v) for k,v in (('tag',tag),('device',device),
('formula',formula),('description',description),
('receivers',receivers),('config',config),
('severity',severity or DEFAULT_SEVERITY))]
self.name = self.tag
if write: self.write()
def trace(self,msg):
print('%s: Alarm(%s): %s'%(fn.time2str(),self.tag,msg))
def clear(self):
""" This method just initializes Flags updated from PyAlarm devices,
it doesn't reset alarm in devices """
self._state = None
self._time = None
self.counter = 0 #N cycles being active
self.active = 0 #Last timestamp it was activated
self.updated = 0 #Last value check
self.recovered = 0 #Last time it was recovered
self.acknowledged = 0 #If active no more reminders will be sent
self.disabled = 0 #If disabled the alarm is not evaluated
self.sent = 0 #Messages sent
self.last_sent = 0 #Time when last message was sent
self.last_error = '' #Last exception
@staticmethod
def parse_formula(formula):
""" Gets "TAG:formula#comment" and returns (tag,formula) """
try:
tag,formula = formula.split('#')[0].split(':',1)
except:
tag,formula = None,None
return tag,formula
def parse_severity(self):
""" Replaces $TAG and $SEVERITY in Alarm severities """
return self.severity.replace('$TAG',self.tag).replace(
'$SEVERITY',self.device)
def parse_description(self):
""" Replaces $TAG and $NAME in Alarm descriptions """
return self.description.replace('$TAG',self.tag).replace(
'$NAME',self.device)
def get_attribute(self,full=False):
""" Gets the boolean attribute associated to this alarm """
m = (self.device+'/' if full else '')
m += self.tag.replace(' ','_').replace('/','_')
return m
def get_model(self):
model = self.get_attribute(full=True)
if ':' not in model: model = self.api.tango_host + '/' + model
return model
def get_ds(self):
""" Gets and AlarmDS object related to this alarm """
try: return self.api.devices[self.device]
except: return AlarmDS(self.device,api=self.api)
def get_engine(self):
"""@TODO this method should return the DevImpl
PyAlarm instance or a DeviceProxy to it"""
return self.get_ds().get_proxy()
def set_active(self,value,count=1,t=None):
"""
BE CAREFUL, IT MAY INTERFERE WITH COUNTER MANAGEMENT WITHIN PYALARM
Will increment/decrement counter and set active and time flags
if the count value (e.g. 1) has been reached
"""
self.updated = now()
if value:
if self.active == 0:
self.counter+=1
elif self.counter>0:
self.counter-=1
if value and value>0 and self.counter>=count and not self.active:
#ACTIVE
tracer('%s.set_active(%s,%s)'%(self.tag,value,self.counter))
self.last_sent = self.updated
self.active = value
self.set_time(t or (value>1 and value))
if value and value<0:
#ERROR
if self.active>=0: self.set_time(t)
self.active = -1
if not value and not self.counter:
#NORM
self.active = 0
self.set_time(t)
if not self.recovered:
#print('%s => %s'%(self.tag,0))
#self.recovered = self.get_time()
pass
return self.active
def get_active(self):
""" This method connects to the Device to get the value and timestamp
of the alarm attribute """
try:
self.active = self.get_time(attr_value = True)
except:
self.active = None
return self.active
def is_active(self,cache=True):
v = self.active if cache else self.get_active()
if self.disabled or v < 0: return -1
elif v > 0: return 1
else: return 0
# DO NOT CREATE PROPERTY DESCRIPTOR FOR SET/GET_ACTIVE!!!
def set_state(self,state=None):
"""
without arguments, this method will update the state from flags
with an state as argument, it will just update the value and flags
accordingly.
with an alarm summary as argument, it will update everything from it
with an activealarms row; it will update Active/Norm/Error states only
"""
t0 = now()
o,a,tag,stamp = self._state,state,self.tag,0
#tracer('%s._state was %s since %s(%s)'
#%(tag,o,self._time,time2str(self._time,us=True)))
| |
#!/usr/bin/python
''' Copyright (c) 2014, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.'''
version = "1.15"
import sys
import math
import numpy
import scipy
from scipy.spatial import Delaunay
from scipy.spatial.distance import cdist
from scipy.spatial.distance import pdist
import getopt
import textwrap
import time
import random
import gc
import multiprocessing
import os
import shutil
import cPickle as pickle
import string
import platform
import gzip
import warnings
################## MULTIPROCESSING ##################
class multi_threading():
"""A class for running calculations on multiple processors"""
results = []
def __init__(self, inputs, num_processors, task_class, params, progress_bar_prefix=''):
"""Launches a calculation on multiple processors
Arguments:
inputs -- A list, containing all the input required for the calculation
num_processors -- An integer, the requested number of processors to use
task_class -- An class, the class governing what calculations will be run on a given thread
progress_bar_prefix -- An optional string, the prefix to append to the progress bar
Returns:
Nothing, though the objects self.results list is populated with the calculation results
"""
# set up the progress bar
self.results = []
indices_to_star = []
if len(inputs) < 50:
indices_to_star = range(len(inputs))
else:
while len(indices_to_star) < 50:
indx_to_add = random.choice(range(len(inputs)))
if not indx_to_add in indices_to_star: indices_to_star.append(indx_to_add)
if progress_bar_prefix != '':
toadd = 78 - len(progress_bar_prefix) - 50
progress_bar_prefix = progress_bar_prefix + (" "*toadd)
sys.stdout.write(progress_bar_prefix)
if num_processors == 1: # so just running on 1 processor, perhaps under windows
single_thread = task_class()
single_thread.total_num_tasks = len(inputs)
single_thread.indices_to_star = indices_to_star
single_thread.results = []
for item in inputs: single_thread.value_func(item, None)
self.results = single_thread.results
else: # so it actually is running on multiple processors
cpu_count = 1
cpu_count = multiprocessing.cpu_count()
# first, if num_processors <= 0, determine the number of processors to use programatically
if num_processors <= 0: num_processors = cpu_count
# reduce the number of processors if too many have been specified
if len(inputs) < num_processors: num_processors = len(inputs)
if len(inputs) == 0: # if there are no inputs, there's nothing to do.
self.results = []
return
# now, divide the inputs into the appropriate number of processors
inputs_divided = {}
for t in range(num_processors): inputs_divided[t] = []
for t in range(0, len(inputs), num_processors):
for t2 in range(num_processors):
index = t + t2
if index < len(inputs): inputs_divided[t2].append(inputs[index])
# now, run each division on its own processor
running = multiprocessing.Value('i', num_processors)
mutex = multiprocessing.Lock()
arrays = []
threads = []
for i in range(num_processors):
athread = task_class()
athread.total_num_tasks = len(inputs)
athread.indices_to_star = indices_to_star
threads.append(athread)
arrays.append(multiprocessing.Array('i',[0, 1]))
results_queue = multiprocessing.Queue() # to keep track of the results
processes = []
for i in range(num_processors):
p = multiprocessing.Process(target=threads[i].runit, args=(running, mutex, results_queue, inputs_divided[i]))
p.start()
processes.append(p)
while running.value > 0: is_running = 0 # wait for everything to finish
# compile all results into one list
for thread in threads:
chunk = results_queue.get()
self.results.extend(chunk)
if progress_bar_prefix != '': print # because the progress bar is now done
class general_task:
"""A parent class of others that governs what calculations are run on each thread"""
results = []
def print_star_if_appropriate(self, current_index):
"""If appropriate, prints an asterix as part of the progress bar
Arguments:
current_index -- An integer, the index of the current calculation
"""
# if the current index is one of the ones that you should star, write out the asterix
if current_index in self.indices_to_star: sys.stdout.write("*")
def runit(self, running, mutex, results_queue, items):
"""Launches the calculations on this thread
Arguments:
running -- A multiprocessing.Value object
mutex -- A multiprocessing.Lock object
results_queue -- A multiprocessing.Queue() object for storing the calculation output
items -- A list, the input data required for the calculation
"""
for item in items: self.value_func(item, results_queue)
mutex.acquire()
running.value -= 1
mutex.release()
results_queue.put(self.results)
################## FUNCTIONS AND CLASSES TO EXTEND NUMPY ##################
class Quaternion:
"""A class supporting quaternion arithmetic"""
def __init__(self, s, x, y, z):
self.v = numpy.empty(4)
self.v[0] = s
self.v[1] = x
self.v[2] = y
self.v[3] = z
def __str__(self):
"""String containing quaternion information in the form of s x y z
Returns:
A string, containing all information about this quaternion
"""
return "" + str(self.v[0]) + "\t" + str(self.v[1]) + "\t" + str(self.v[2]) + "\t" + str(self.v[3])
def copy_of(self):
"""Returns a copy of self"""
return Quaternion(self.v[0], self.v[1], self.v[2], self.v[3])
def load_from_mat(self, m):
"""Converts a rotation matrix that is pure orthogonal (det(matrix)=1) into a Quaternion. Adapted from http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/index.htm
Arguments:
m -- A 2D numpy.array representing a pure orthogonal matrix
"""
#Make sure m is a 3x3 array
if m.shape[0] != 3 or m.shape[1] != 3:
print "Could not load quaternion from matrix...size is not (3x3)"
return
#Check that matrix is orthogonal. m_T = m_inv
if not numpy.array_equal(numpy.transpose(m),numpy.linalg.inv(m)):
print "Load Quaternion error. Matrix is not orthogonal"
return
#Need to make sure that the matrix is special orthogonal
if math.fabs(1-numpy.linalg.det(m)) > 0.000001: #Done for rounding errors
print "Load Quaternion error. Determinant is not 1"
return
#First calculate the sum of the diagonal elements
t = m.trace()
if t > 0:
S = math.sqrt(t + 1.0) * 2
self.v[0] = .25 * S
self.v[1] = (m[2,1] - m[1,2]) / S
self.v[2] = (m[0,2] - m[2,0]) / S
self.v[3] = (m[1,0] - m[0,1]) / S
elif m[0,0] > m[1,1] and m[0,0] > m[2,2]:
S = math.sqrt(1.0 + m[0,0] - m[1,1] - m[2,2]) * 2
self.v[0] = (m[2,1] - m[1,2]) / S
self.v[1] = .25 * S
self.v[2] = (m[0,1] + m[1,0]) / S
self.v[3] = (m[0,2] + m[2,0]) / S
elif m[1,1] > m[2,2]:
S = math.sqrt(1.0 + m[1,1] - m[0,0] - m[2,2]) * 2
self.v[0] = (m[0,2] - m[2,0]) / S
self.v[1] = (m[0,1] + m[1,0]) / S
self.v[2] = .25 * S
self.v[3] = (m[2,1] + m[1,2]) / S
else:
S = math.sqrt(1.0) * 2
self.v[0] = (m[1,0] - m[0,1]) / S
self.v[1] = (m[0,2] + m[2,0]) / S
self.v[2] = (m[2,1] + m[1,2]) / S
self.v[3] = .25 * S
def rep_as_44_matrix(self):
"""Creates a 4x4 matrix representation of the Quaternion.
Returns:
A 4x4 numpy array
"""
n = self.normalize()
qw = n.v[0]
qx = n.v[1]
qy = n.v[2]
qz = n.v[3]
return numpy.array([[qw,qx,qy,qz],[-qx,qw,-qz,qy],[-qy,qz,qw,-qx],[-qz,-qy,qx,qw]])
def to_matrix(self):
"""Converts to a normalized 3x3 matrix
Returns:
A 3x3 numpy.array, corresponding to the quaternion
"""
#First normalize
n = self.normalize()
qw = n.v[0]
qx = n.v[1]
qy = n.v[2]
qz = n.v[3]
return numpy.array(
[[1.0 - 2.0 * qy * qy - 2.0 * qz * qz, 2.0 * qx * qy - 2.0 * qz * qw, 2.0 * qx * qz + 2.0 * qy * qw],
[2.0 * qx * qy | |
is breached.
id: :class:`~async_v20.TransactionID`
The Transaction's Identifier.
time: :class:`~async_v20.DateTime`
The date/time when the Transaction was created.
user_id: :class:`int`
The ID of the user that initiated the creation of the Transaction.
account_id: :class:`~async_v20.AccountID`
The ID of the Account the Transaction was created for.
batch_id: :class:`~async_v20.TransactionID`
The ID of the "batch" that the Transaction belongs to.
Transactions in the same batch are applied to the Account simultaneously.
request_id: :class:`~async_v20.RequestID`
The Request ID of the request which generated the transaction.
client_trade_id: :class:`~async_v20.TradeID`
The client ID of the Trade to be closed when the price threshold is breached.
price: :class:`~async_v20.PriceValue`
The price threshold specified for the StopLoss Order. The associated Trade will be
closed by a market price that is equal to or worse than this threshold.
time_in_force: :class:`~async_v20.TimeInForce`
The time-in-force requested for the StopLoss Order. Restricted
to "GTC", "GFD" and "GTD" for StopLoss Orders.
gtd_time: :class:`~async_v20.DateTime`
The date/time when the StopLoss Order will
be cancelled if its timeInForce is "GTD".
trigger_condition: :class:`~async_v20.OrderTriggerCondition`
Specification of what component of a price should be used
for comparison when determining if the Order should be filled.
reason: :class:`~async_v20.StopLossOrderReason`
The reason that the Stop Loss Order was initiated
client_extensions: :class:`~async_v20.ClientExtensions`
Client Extensions to add to the Order (only provided
if the Order is being created with client extensions).
order_fill_transaction_id: :class:`~async_v20.TransactionID`
The ID of the OrderFill Transaction that caused this Order to be created
(only provided if this Order was created automatically when another Order was filled).
replaces_order_id: :class:`~async_v20.OrderID`
The ID of the Order that this Order replaces
(only provided if this Order replaces an existing Order).
cancelling_transaction_id: :class:`~async_v20.TransactionID`
The ID of the Transaction that cancels the replaced
Order (only provided if this Order replaces an existing Order).
guaranteed: :class:`bool`
Flag indicating that the Stop Loss Order is guaranteed. The default value
depends on the GuaranteedStopLossOrderMode of the account, if it is
REQUIRED, the default will be true, for DISABLED or ENABLED the default
is false.
"""
def __init__(self, trade_id: TradeID, price: PriceValue, id: TransactionID = sentinel, time: DateTime = sentinel,
user_id: int = sentinel, account_id: AccountID = sentinel, batch_id: TransactionID = sentinel,
request_id: RequestID = sentinel,
client_trade_id: ClientID = sentinel, time_in_force: TimeInForce = 'GTC',
gtd_time: DateTime = sentinel,
trigger_condition: OrderTriggerCondition = 'DEFAULT', reason: StopLossOrderReason = sentinel,
client_extensions: ClientExtensions = sentinel, order_fill_transaction_id: TransactionID = sentinel,
replaces_order_id: OrderID = sentinel, cancelling_transaction_id: TransactionID = sentinel,
guaranteed: bool = sentinel):
Model.__init__(**locals())
class TrailingStopLossOrderTransaction(Transaction, type=TransactionType('TRAILING_STOP_LOSS_ORDER')):
"""A TrailingStopLossOrderTransaction represents the creation of a
TrailingStopLoss Order in the user's Account.
Attributes:
trade_id: :class:`~async_v20.TradeID`
The ID of the Trade to close when the price threshold is breached.
id: :class:`~async_v20.TransactionID`
The Transaction's Identifier.
time: :class:`~async_v20.DateTime`
The date/time when the Transaction was created.
user_id: :class:`int`
The ID of the user that initiated the creation of the Transaction.
account_id: :class:`~async_v20.AccountID`
The ID of the Account the Transaction was created for.
batch_id: :class:`~async_v20.TransactionID`
The ID of the "batch" that the Transaction belongs to.
Transactions in the same batch are applied to the Account simultaneously.
request_id: :class:`~async_v20.RequestID`
The Request ID of the request which generated the transaction.
client_trade_id: :class:`~async_v20.TradeID`
The client ID of the Trade to be closed when the price threshold is breached.
distance: :class:`~async_v20.PriceValue`
The price distance specified for the TrailingStopLoss Order.
time_in_force: :class:`~async_v20.TimeInForce`
The time-in-force requested for the TrailingStopLoss Order. Restricted
to "GTC", "GFD" and "GTD" for TrailingStopLoss Orders.
gtd_time: :class:`~async_v20.DateTime`
The date/time when the StopLoss Order will
be cancelled if its timeInForce is "GTD".
trigger_condition: :class:`~async_v20.OrderTriggerCondition`
Specification of what component of a price should be used
for comparison when determining if the Order should be filled.
reason: :class:`~async_v20.TrailingStopLossOrderReason`
The reason that the Trailing Stop Loss Order was initiated
client_extensions: :class:`~async_v20.ClientExtensions`
Client Extensions to add to the Order (only provided
if the Order is being created with client extensions).
order_fill_transaction_id: :class:`~async_v20.TransactionID`
The ID of the OrderFill Transaction that caused this Order to be created
(only provided if this Order was created automatically when another Order was filled).
replaces_order_id: :class:`~async_v20.OrderID`
The ID of the Order that this Order replaces
(only provided if this Order replaces an existing Order).
cancelling_transaction_id: :class:`~async_v20.TransactionID`
The ID of the Transaction that cancels the replaced
Order (only provided if this Order replaces an existing Order).
"""
def __init__(self, trade_id: TradeID, distance: PriceValue, id: TransactionID = sentinel, time: DateTime = sentinel,
user_id: int = sentinel, account_id: AccountID = sentinel, batch_id: TransactionID = sentinel,
request_id: RequestID = sentinel,
client_trade_id: ClientID = sentinel, time_in_force: TimeInForce = 'GTC',
gtd_time: DateTime = sentinel,
trigger_condition: OrderTriggerCondition = 'DEFAULT', reason: TrailingStopLossOrderReason = sentinel,
client_extensions: ClientExtensions = sentinel, order_fill_transaction_id: TransactionID = sentinel,
replaces_order_id: OrderID = sentinel, cancelling_transaction_id: TransactionID = sentinel):
Model.__init__(**locals())
class LimitOrder(Order, type=OrderType('LIMIT')):
"""A LimitOrder is an order that is created with a price threshold, and will
only be filled by a price that is equal to or better than the threshold.
Attributes:
instrument: :class:`~async_v20.InstrumentName`
The Limit Order's Instrument.
units: :class:`~async_v20.DecimalNumber`
The quantity requested to be filled by the Limit Order. A posititive number of units
results in a long Order, and a negative number of units results in a short Order.
price: :class:`~async_v20.PriceValue`
The price threshold specified for the Limit Order. The Limit Order will only be
filled by a market price that is equal to or better than this price.
id: :class:`~async_v20.OrderID`
The Order's identifier, unique within the Order's Account.
create_time: :class:`~async_v20.DateTime`
The time when the Order was created.
state: :class:`~async_v20.OrderState`
The current state of the Order.
client_extensions: :class:`~async_v20.ClientExtensions`
The client extensions of the Order. Do not set, modify,
or delete clientExtensions if your account is associated with MT4.
time_in_force: :class:`~async_v20.TimeInForce`
The time-in-force requested for the Limit Order.
gtd_time: :class:`~async_v20.DateTime`
The date/time when the Limit Order will
be cancelled if its timeInForce is "GTD".
position_fill: :class:`~async_v20.OrderPositionFill`
Specification of how Positions in the Account
are modified when the Order is filled.
trigger_condition: :class:`~async_v20.OrderTriggerCondition`
Specification of what component of a price should be used
for comparison when determining if the Order should be filled.
take_profit_on_fill: :class:`~async_v20.TakeProfitDetails`
TakeProfitDetails specifies the details of a Take Profit Order to be created on behalf of
a client. This may happen when an Order
is filled that opens a Trade requiring a Take Profit, or when a Trade's dependent Take Profit Order is
modified directly through the Trade.
stop_loss_on_fill: :class:`~async_v20.StopLossDetails`
StopLossDetails specifies the details of a Stop Loss Order to be created on behalf of a
client. This may happen when an Order
is filled that opens a Trade requiring a Stop Loss, or when a Trade's dependent Stop Loss Order is modified
directly through the Trade.
trailing_stop_loss_on_fill: :class:`~async_v20.TrailingStopLossDetails`
TrailingStopLossDetails specifies the details of a Trailing Stop Loss Order to be
created on behalf of a client. This may happen when an Order is
filled that opens a Trade requiring a Trailing Stop Loss, or when a Trade's dependent Trailing Stop Loss
Order is modified directly through the Trade.
trade_client_extensions: :class:`~async_v20.ClientExtensions`
Client Extensions to add to the Trade created when the Order is filled (if such a
Trade is created). Do not set, modify, or delete tradeClientExtensions if your account is associated with
MT4.
filling_transaction_id: :class:`~async_v20.TransactionID`
ID of the Transaction that filled this Order
(only provided when the Order's state is FILLED)
filled_time: :class:`~async_v20.DateTime`
Date/time when the Order was filled (only
provided when the Order's state is FILLED)
trade_opened_id: :class:`~async_v20.TradeID`
Trade ID of Trade opened when the Order was filled (only provided when the
Order's state is FILLED and a Trade was opened as a result of the fill)
trade_reduced_id: :class:`~async_v20.TradeID`
Trade ID of Trade reduced when the Order was filled (only provided when the
Order's state is FILLED and a Trade was reduced as a result of the fill)
trade_closed_ids: ( :class:`~async_v20.TradeID`, ...),
Trade IDs of Trades closed when the Order was filled (only provided when the Order's
state is FILLED and one or more Trades were closed as a result of the fill)
cancelling_transaction_id: :class:`~async_v20.TransactionID`
ID of | |
export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='MathsRegionType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MathsRegionType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'MathsRegionType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MathsRegionType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='MathsRegionType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MathsRegionType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.orientation is not None and 'orientation' not in already_processed:
already_processed.add('orientation')
outfile.write(' orientation="%s"' % self.gds_format_float(self.orientation, input_name='orientation'))
if self.bgColour is not None and 'bgColour' not in already_processed:
already_processed.add('bgColour')
outfile.write(' bgColour=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.bgColour), input_name='bgColour')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='MathsRegionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Coords is not None:
namespaceprefix_ = self.Coords_nsprefix_ + ':' if (UseCapturedNS_ and self.Coords_nsprefix_) else ''
self.Coords.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Coords', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('orientation', node)
if value is not None and 'orientation' not in already_processed:
already_processed.add('orientation')
value = self.gds_parse_float(value, node, 'orientation')
self.orientation = value
value = find_attr_value_('bgColour', node)
if value is not None and 'bgColour' not in already_processed:
already_processed.add('bgColour')
self.bgColour = value
self.validate_ColourSimpleType(self.bgColour) # validate type ColourSimpleType
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Coords':
obj_ = CoordsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Coords = obj_
obj_.original_tagname_ = 'Coords'
# end class MathsRegionType
class NoiseRegionType(GeneratedsSuper):
"""Noise regions are regions where no real data lies, only false data
created by artifacts on the document or scanner noise."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, id=None, Coords=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.id = _cast(None, id)
self.id_nsprefix_ = None
self.Coords = Coords
self.Coords_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, NoiseRegionType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if NoiseRegionType.subclass:
return NoiseRegionType.subclass(*args_, **kwargs_)
else:
return NoiseRegionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Coords(self):
return self.Coords
def set_Coords(self, Coords):
self.Coords = Coords
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def hasContent_(self):
if (
self.Coords is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='NoiseRegionType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('NoiseRegionType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'NoiseRegionType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='NoiseRegionType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='NoiseRegionType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='NoiseRegionType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='NoiseRegionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Coords is not None:
namespaceprefix_ = self.Coords_nsprefix_ + ':' if (UseCapturedNS_ and self.Coords_nsprefix_) else ''
self.Coords.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Coords', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Coords':
obj_ = CoordsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Coords = obj_
obj_.original_tagname_ = 'Coords'
# end class NoiseRegionType
class UnknownRegionType(GeneratedsSuper):
"""To be used if the region type cannot be ascertained."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, id=None, Coords=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.id = _cast(None, id)
self.id_nsprefix_ = None
self.Coords = Coords
self.Coords_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, UnknownRegionType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if UnknownRegionType.subclass:
return UnknownRegionType.subclass(*args_, **kwargs_)
else:
return UnknownRegionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Coords(self):
return self.Coords
def set_Coords(self, Coords):
self.Coords = Coords
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def hasContent_(self):
if (
self.Coords is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='UnknownRegionType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('UnknownRegionType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'UnknownRegionType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='UnknownRegionType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='UnknownRegionType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='UnknownRegionType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='UnknownRegionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Coords is not None:
namespaceprefix_ = self.Coords_nsprefix_ + ':' if (UseCapturedNS_ and self.Coords_nsprefix_) else ''
self.Coords.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Coords', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Coords':
obj_ = CoordsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Coords = obj_
obj_.original_tagname_ = 'Coords'
# end class UnknownRegionType
class FrameRegionType(GeneratedsSuper):
"""A region that surrounds other regions (e.g. a box with
blue background containing text regions)"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, id=None, bgColour=None, borderPresent=None, Coords=None, TextRegion=None, ImageRegion=None, LineDrawingRegion=None, GraphicRegion=None, TableRegion=None, ChartRegion=None, SeparatorRegion=None, MathsRegion=None, NoiseRegion=None, FrameRegion=None, UnknownRegion=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.id = _cast(None, id)
self.id_nsprefix_ = None
self.bgColour = _cast(None, bgColour)
self.bgColour_nsprefix_ = None
self.borderPresent = _cast(bool, borderPresent)
self.borderPresent_nsprefix_ = None
self.Coords = Coords
self.Coords_nsprefix_ = None
if TextRegion is None:
self.TextRegion = []
else:
self.TextRegion = TextRegion
self.TextRegion_nsprefix_ = None
if ImageRegion is None:
self.ImageRegion = []
else:
self.ImageRegion = ImageRegion
self.ImageRegion_nsprefix_ = None
if LineDrawingRegion is None:
self.LineDrawingRegion = []
else:
self.LineDrawingRegion = LineDrawingRegion
self.LineDrawingRegion_nsprefix_ = None
if GraphicRegion is None:
self.GraphicRegion = []
else:
self.GraphicRegion = GraphicRegion
self.GraphicRegion_nsprefix_ = None
if TableRegion is None:
self.TableRegion = []
else:
self.TableRegion = TableRegion
self.TableRegion_nsprefix_ = None
if ChartRegion is None:
self.ChartRegion = []
else:
self.ChartRegion = ChartRegion
self.ChartRegion_nsprefix_ = None
if SeparatorRegion is None:
self.SeparatorRegion = []
else:
self.SeparatorRegion = SeparatorRegion
self.SeparatorRegion_nsprefix_ = None
if MathsRegion is None:
self.MathsRegion = []
| |
"""Tests for the policies in the hbaselines/goal_conditioned subdirectory."""
import unittest
import numpy as np
import tensorflow as tf
import os
from gym.spaces import Box
from hbaselines.utils.tf_util import get_trainable_vars
from hbaselines.goal_conditioned.td3 import GoalConditionedPolicy as \
TD3GoalConditionedPolicy
from hbaselines.goal_conditioned.sac import GoalConditionedPolicy as \
SACGoalConditionedPolicy
from hbaselines.algorithms.rl_algorithm import SAC_PARAMS
from hbaselines.algorithms.rl_algorithm import TD3_PARAMS
from hbaselines.algorithms.rl_algorithm import GOAL_CONDITIONED_PARAMS
class TestBaseGoalConditionedPolicy(unittest.TestCase):
"""Test GoalConditionedPolicy in hbaselines/goal_conditioned/base.py."""
def setUp(self):
self.policy_params = {
'sess': tf.compat.v1.Session(),
'ac_space': Box(low=-1, high=1, shape=(1,)),
'ob_space': Box(low=-2, high=2, shape=(2,)),
'co_space': Box(low=-3, high=3, shape=(2,)),
'verbose': 0,
'total_steps': 1,
}
self.policy_params.update(TD3_PARAMS.copy())
self.policy_params.update(GOAL_CONDITIONED_PARAMS.copy())
def tearDown(self):
self.policy_params['sess'].close()
del self.policy_params
# Clear the graph.
tf.compat.v1.reset_default_graph()
def test_store_transition(self):
"""Check the functionality of the store_transition() method.
This method is tested for the following cases:
1. hindsight = False, relative_goals = False
2. hindsight = False, relative_goals = True
3. hindsight = True, relative_goals = False
4. hindsight = True, relative_goals = True
1. hindsight = False, relative_goals = False, meta_period = [5, 2]
"""
# =================================================================== #
# test case 1 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = False
policy_params['hindsight'] = False
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = 4
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [np.array([5, 5])]
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward = policy.replay_buffer._reward_t[0]
done = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward,
[[6], [-5.656854249501219, -4.24264068713107,
-2.8284271247638677, -1.4142135624084504]])
self.assertEqual(done,
[False, False, False, False])
def test_store_transition_2(self):
# =================================================================== #
# test case 2 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = True
policy_params['hindsight'] = False
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = 4
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [np.array([5, 5])]
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward = policy.replay_buffer._reward_t[0]
done = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([4, 4])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward,
[[6], [-5.656854249501219, -5.656854249501219,
-5.656854249501219, -5.656854249501219]])
self.assertEqual(done, [False, False, False, False])
def test_store_transition_3(self):
# =================================================================== #
# test case 3 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = False
policy_params['hindsight'] = True
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = 4
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [np.array([5, 5])]
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
# unchanged sample
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward_t = policy.replay_buffer._reward_t[0]
done_t = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward_t,
[[6], [-5.656854249501219, -4.24264068713107,
-2.8284271247638677, -1.4142135624084504]])
self.assertEqual(done_t, [False, False, False, False])
# hindsight sample
obs_t = policy.replay_buffer._obs_t[1]
action_t = policy.replay_buffer._action_t[1]
reward_t = policy.replay_buffer._reward_t[1]
done_t = policy.replay_buffer._done_t[1]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([4, 4]),
np.array([4, 4]),
np.array([4, 4]),
np.array([4, 4]),
np.array([4, 4])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward_t,
[[6], [-4.24264068713107, -2.8284271247638677,
-1.4142135624084504, -1e-05]])
self.assertEqual(done_t, [False, False, False, False])
def test_store_transition_4(self):
# =================================================================== #
# test case 4 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = True
policy_params['hindsight'] = True
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = 4
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [np.array([5, 5])]
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
# unchanged sample
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward = policy.replay_buffer._reward_t[0]
done = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([4, 4])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward,
[[6], [-5.656854249501219, -5.656854249501219,
-5.656854249501219, -5.656854249501219]])
self.assertEqual(done, [False, False, False, False])
# hindsight sample
obs_t = policy.replay_buffer._obs_t[1]
action_t = policy.replay_buffer._action_t[1]
reward_t = policy.replay_buffer._reward_t[1]
done_t = policy.replay_buffer._done_t[1]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([4, 4]),
np.array([3, 3]),
np.array([2, 2]),
np.array([1, 1]),
np.array([0, 0])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward_t,
[[6], [-4.24264068713107, -2.8284271247638677,
-1.4142135624084504, -1e-05]])
self.assertEqual(done_t, [False, False, False, False])
def test_store_transition_5(self):
# =================================================================== #
# test case 1 #
# =================================================================== #
policy_params = self.policy_params.copy()
policy_params['relative_goals'] = False
policy_params['hindsight'] = False
policy_params['subgoal_testing_rate'] = 1
policy_params['meta_period'] = [5, 2]
policy_params['num_levels'] = 3
policy_params['batch_size'] = 2
policy = TD3GoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
policy.meta_action = [[np.array([5, 5]), np.array([6, 6])]]
for i in range(10):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
done, is_final_step, evaluate = False, False, False
policy.store_transition(
obs0=obs0,
context0=context0,
action=action,
reward=reward,
obs1=obs1,
context1=context1,
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
obs_t = policy.replay_buffer._obs_t[0]
action_t = policy.replay_buffer._action_t[0]
reward = policy.replay_buffer._reward_t[0]
done = policy.replay_buffer._done_t[0]
# check the various attributes
self.assertTrue(
all(all(obs_t[i] ==
[np.array([0, 0]),
np.array([1, 1]),
np.array([2, 2]),
np.array([3, 3]),
np.array([4, 4]),
np.array([5, 5]),
np.array([6, 6]),
np.array([7, 7]),
np.array([8, 8]),
np.array([9, 9]),
np.array([10, 10])][i])
for i in range(len(obs_t)))
)
for i in range(len(action_t)):
self.assertTrue(
all(all(action_t[i][j] ==
[[np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5]),
np.array([5, 5])],
[np.array([6, 6]),
np.array([6, 6]),
np.array([6, 6]),
np.array([6, 6]),
np.array([6, 6]),
np.array([6, 6]),
np.array([6, 6]),
np.array([6, 6]),
np.array([6, 6]),
np.array([6, 6]),
np.array([6, 6])],
[np.array([0]),
np.array([1]),
np.array([2]),
np.array([3]),
np.array([4]),
np.array([5]),
np.array([6]),
np.array([7]),
np.array([8]),
np.array([9])]][i][j])
for j in range(len(action_t[i])))
)
self.assertEqual(reward,
[[45],
[-9.899494936632289, -4.242640687172318,
-1.4142235624084505, -7.071067811894938,
-12.727922061373764],
[-7.071067811872546, -5.656854249501219,
-4.24264068713107, -2.8284271247638677,
-1.4142135624084504, -1e-05,
-1.4142135624084504, -2.8284271247638677,
-4.24264068713107, -5.656854249501219]])
self.assertEqual(done, [False, False, False, False, False, | |
],
[ "c", "a", "t#^", [ AE, AE, ] ],
[ "^^^", "a", Anything, [ EY, ] ],
[ "^.", "a", "^e", [ EY, ] ],
[ "^.", "a", "^i", [ EY, ] ],
[ "^^", "a", Anything, [ AE, ] ],
[ "^", "a", "^##", [ EY, ] ],
[ "^", "a", "^#", [ EY, ] ],
[ "^", "a", "^#", [ EH, EY, ] ],
[ Anything, "a", "^%", [ EY, ] ],
[ "#", "a", Nothing, [ AO, ] ],
[ Anything, "a", "wa", [ AX, ] ],
[ Anything, "a", Nothing, [ AX, ] ],
[ Anything, "a", "^+#", [ EY, ] ],
[ Anything, "a", "^+:#", [ AE, ] ],
[ " :", "a", "^+ ", [ EY, ] ],
[ Anything, "a", Anything, [ AE, ] ],
]
#2 - b
r_b = [
[ "b", "b", Anything, Silent ],
[ Anything, "bi", "cycle", [ BB2, AY, ] ],
[ Anything, "bi", "cycle", [ BB2, AY, ] ],
[ Anything, "bbq", Anything, [ BB2, AX, AX, ER1, BB2, AX, KK2, YY2, UW2, ] ],
[ Anything, "barbeque", Anything, [ BB2, AX, AX, ER1, BB2, AX, KK2, YY2, UW2, ] ],
[ Anything, "barbaque", Anything, [ BB2, AX, AX, ER1, BB2, AX, KK2, YY2, UW2, ] ],
[ Anything, "bargain", Anything, [ BB2, AO, ER1, GG1, EH, NN1, ] ],
[ Anything, "bagel", Anything, [ BB2, EY, GG1, EH, LL, ] ],
[ Anything, "being", Anything, [ BB2, IY, IH, NG, ] ],
[ Anything, "bomb", Anything, [ BB2, AA, AA, MM, ] ],
[ Nothing, "both", Nothing, [ BB2, OW, TH, ] ],
[ Anything, "buil", Anything, [ BB2, IH, LL, ] ],
[ Nothing, "bus", "y", [ BB2, IH, ZZ, ] ],
[ Nothing, "bus", "#", [ BB2, IH, ZZ, ] ],
[ Anything, "bye", Anything, [ BB2, AO, AY, ] ],
[ Anything, "bear", Nothing, [ BB2, EY, ER2, ] ],
[ Anything, "bear", "%", [ BB2, EY, ER2, ] ],
[ Anything, "bear", "s", [ BB2, EY, ER2, ] ],
[ Anything, "bear", "#", [ BB2, EY, ER2, ] ],
[ Nothing, "beau", Anything, [ BB2, OW, ] ],
[ Anything, "ban", "ish", [ BB2, AE, AE, NN1, ] ],
[ Nothing, "be", "^#", [ BB2, IH, ] ],
[ Nothing, "by", Anything, [ BB2, AO, AY, ] ],
[ "y", "be", Nothing, [ BB2, IY, ] ],
[ Nothing, "b", "#", [ BB2, ] ],
[ Anything, "b", Nothing, [ BB1, ] ],
[ Anything, "b", "#", [ BB1, ] ],
[ Anything, "b", "l", [ BB1, ] ],
[ Anything, "b", "r", [ BB1, ] ],
[ Anything, "b", Anything, [ BB2, ] ],
]
#3 - c
r_c = [
[ Anything, "chinese", Anything, [ CH, AY, NN1, IY, SS, ] ],
[ Anything, "country", Anything, [ KK1, AX, AX, NN1, TT2, ER1, IY, ] ],
[ Anything, "christ", Nothing, [ KK3, ER1, AY, SS, TT2, ] ],
[ Anything, "chassis", Anything, [ CH, AX, AX, SS, IY, ] ],
[ Anything, "closet", Anything, [ KK3, LL, AO, AO, ZZ, EH, TT2, ] ],
[ Anything, "china", Anything, [ CH, AY, NN1, AX, ] ],
[ Nothing, "cafe", Nothing, [ KK1, AE, FF, AE, EY, ] ],
[ Anything, "cele", Anything, [ SS, EH, LL, PA1, EH, ] ],
[ Anything, "cycle", Anything, [ SS, AY, KK3, UH, LL, ] ],
[ Anything, "chron", Anything, [ KK1, ER1, AO, NN1, ] ],
[ Anything, "crea", "t", [ KK3, ER1, IY, EY, ] ],
[ Nothing, "cry", Nothing, [ KK3, ER1, IY, ] ],
[ Nothing, "chry", Anything, [ KK3, ER1, AO, AY, ] ],
[ Nothing, "cry", "#", [ KK3, ER1, AO, AY, ] ],
[ Nothing, "caveat", ":", [ KK1, AE, VV, IY, AE, TT2, ] ],
[ "^", "cuit", Anything, [ KK1, IH, TT2, ] ],
[ Anything, "chaic", Anything, [ KK1, EY, IH, KK1, ] ],
[ Anything, "cation", Anything, [ KK1, EY, SH, AX, NN1, ] ],
[ Nothing, "ch", "aract", [ KK1, ] ],
[ Nothing, "ch", "^", [ KK1, ] ],
[ "^e", "ch", Anything, [ KK1, ] ],
[ Anything, "ch", Anything, [ CH, ] ],
[ " s", "ci", "#", [ SS, AY, ] ],
[ Anything, "ci", "a", [ SH, ] ],
[ Anything, "ci", "o", [ SH, ] ],
[ Anything, "ci", "en", [ SH, ] ],
[ Anything, "c", "+", [ SS, ] ],
[ Anything, "ck", Anything, [ KK2, ] ],
[ Anything, "com", "%", [ KK1, AH, MM, ] ],
#[ Anything, "c", "^", [ KK3, ] ],
[ Anything, "c", "u", [ KK3, ] ],
[ Anything, "c", "o", [ KK3, ] ],
[ Anything, "c", "a^^", [ KK3, ] ],
[ Anything, "c", "o^^", [ KK3, ] ],
[ Anything, "c", "l", [ KK3, ] ],
[ Anything, "c", "r", [ KK3, ] ],
[ Anything, "c", "a", [ KK1, ] ],
[ Anything, "c", "e", [ KK1, ] ],
[ Anything, "c", "i", [ KK1, ] ],
[ Anything, "c", Nothing, [ KK2, ] ],
[ Anything, "c", Anything, [ KK1, ] ],
]
#d 10
r_d = [
[ Anything, "dead", Anything, [ DD2, EH, EH, DD1, ] ],
[ Nothing, "dogged", Anything, [ DD2, AO, GG1, PA1, EH, DD1, ] ],
[ "#:", "ded", Nothing, [ DD2, IH, DD1, ] ],
[ Nothing, "dig", Anything, [ DD2, IH, IH, GG1, ] ],
[ Nothing, "dry", Nothing, [ DD2, ER1, AO, AY, ] ],
[ Nothing, "dry", "#", [ DD2, ER1, AO, AY, ] ],
[ Nothing, "de", "^#", [ DD2, IH, ] ],
[ Nothing, "do", Nothing, [ DD2, UW2, ] ],
[ Nothing, "does", Anything, [ DD2, AH, ZZ, ] ],
[ Nothing, "doing", Anything, [ UW2, IH, NG, ] ],
[ Nothing, "dow", Anything, [ DD2, AW, ] ],
[ Anything, "du", "a", [ JH, UW2, ] ],
[ Anything, "dyna", Anything, [ DD2, AY, NN1, AX, PA1, ] ],
[ Anything, "dyn", "#", [ DD2, AY, NN1, PA1, ] ],
[ "d", "d", Anything, Silent ],
[ Anything, "d", Nothing, [ DD1, ] ],
[ Nothing, "d", Anything, [ DD2, ] ],
[ Anything, "d", Anything, [ DD2, ] ],
]
#e 52
r_e = [
[ Nothing, "eye", Anything, [ AA, AY, ] ],
[ Anything, "ered", Nothing, [ ER2, DD1, ] ],
[ Nothing, "ego", Anything, [ IY, GG1, OW, ] ],
[ Nothing, "err", Anything, [ EH, EH, ER1, ] ],
[ "^", "err", Anything, [ EH, EH, ER1, ] ],
[ Anything, "ev", "er", [ EH, EH, VV, HH1, ] ],
[ Anything, "e", "ness", Silent ],
#[ Anything, "e", "^%", "IY, ] ],
[ Anything, "eri", "#", [ IY, XR, IY, ] ],
[ Anything, "eri", Anything, [ EH, ER1, IH, ] ],
[ "#:", "er", "#", [ ER2, ] ],
[ Anything, "er", "#", [ EH, EH, ER1, ] ],
[ Anything, "er", Anything, [ ER2, ] ],
[ Nothing, "evil", Anything, [ IY, VV, EH, LL, ] ],
[ Nothing, "even", Anything, [ IY, VV, EH, NN1, ] ],
[ "m", "edia", Anything, [ IY, DD2, IY, AX, ] ],
[ Anything, "ecia", Anything, [ IY, SH, IY, EY, ] ],
[ ":", "eleg", Anything, [ EH, LL, EH, GG1, ] ],
[ "#:", "e", "w", Silent ],
[ "t", "ew", Anything, [ UW2, ] ],
[ "s", "ew", Anything, [ UW2, ] ],
[ "r", "ew", Anything, [ UW2, ] ],
[ "d", "ew", Anything, [ UW2, ] ],
[ "l", "ew", Anything, [ UW2, ] ],
[ "z", "ew", Anything, [ UW2, ] ],
[ "n", "ew", Anything, [ UW2, ] ],
[ "j", "ew", Anything, [ UW2, ] ],
[ "th", "ew", Anything, [ UW2, ] ],
[ "ch", "ew", Anything, [ UW2, ] ],
[ "sh", "ew", Anything, [ UW2, ] ],
[ Anything, "ew", Anything, [ YY2, UW2, ] ],
[ Anything, "e", "o", [ IY, ] ],
[ "#:s", "es", Nothing, [ IH, ZZ, ] ],
[ "#:c", "es", Nothing, [ IH, ZZ, ] ],
[ "#:g", "es", Nothing, [ IH, ZZ, ] ],
[ "#:z", "es", Nothing, [ IH, ZZ, ] ],
[ "#:x", "es", Nothing, [ IH, ZZ, ] ],
[ "#:j", "es", Nothing, [ IH, ZZ, ] ],
[ "#:ch", "es", Nothing, [ IH, ZZ, ] ],
[ "#:sh", "es", Nothing, [ IH, ZZ, ] ],
[ "#:", "e", "s ", Silent ],
[ "#:", "ely", Nothing, [ LL, IY, ] ],
[ "#:", "ement", Anything, [ PA1, MM, EH, NN1, TT2, ] ],
[ Anything, "eful", Anything, [ PA1, FF, UH, LL, ] ],
[ Anything, "ee", Anything, [ IY, ] ],
[ Anything, "earn", Anything, [ ER2, NN1, ] ],
[ Nothing, "ear", "^", [ ER2, ] ],
[ "k.", "ead", Anything, [ IY, DD2, ] ],
[ "^.", "ead", Anything, [ EH, DD2, ] ],
[ "d", "ead", Anything, [ EH, DD2, ] ],
[ Anything, "ead", Anything, [ IY, DD2, ] ],
[ "#:", "ea", Nothing, [ IY, AX, ] ],
[ "#:", "ea", "s", [ IY, AX, ] ],
[ Anything, "ea", "su", [ EH, ] ],
[ Anything, "ea", Anything, [ IY, ] ],
[ Anything, "eigh", Anything, [ EY, ] ],
[ "l", "ei", Anything, [ IY, ] ],
[ ".", "ei", Anything, [ EY, ] ],
[ Anything, "ei", "n", [ AY, ] ],
[ Anything, "ei", Anything, [ IY, ] ],
[ Anything, "ey", Anything, [ IY, ] ],
[ Anything, "eu", Anything, [ YY2, UW2, ] ],
[ "#:", "e", "d ", Silent ],
[ "#s", "e", "^", Silent ],
[ ":", "e", "x", [ EH, EH, ] ],
[ "#:", "e", Nothing, Silent ],
[ "+:", "e", Nothing, Silent ],
[ "':^", "e", Nothing, Silent ],
[ ":", "equ", Anything, [ IY, KK1, WW, ] ],
[ "dg", "e", Anything, Silent ],
[ "dh", "e", Anything, [ IY, ] ],
[ " :", "e", Nothing, [ IY, ] ],
[ "#", "ed", Nothing, [ DD1, ] ],
[ Anything, "e", Anything, [ EH, ] ],
]
#f 2
r_f = [
[ Anything, "fnord", Anything, [ FF, NN1, AO, OR, DD1, ] ],
[ Anything, "four", Anything, [ FF, OW, ER1, ] ],
[ Anything, "ful", Anything, [ PA1, FF, UH, LL, ] ],
[ Nothing, "fly", Anything, [ FF, LL, AO, AY, ] ],
[ ".", "fly", Anything, [ FF, LL, AO, AY, ] ],
[ Anything, "fixed", Anything, [ FF, IH, KK1, SS, TT2, ] ],
[ Anything, "five", Anything, [ FF, AO, AY, VV, ] ],
[ Anything, "foot", Anything, [ FF, UH, UH, TT2, ] ],
[ Anything, "f", Anything, [ FF, ] ],
]
#g 10
r_g = [
[ Anything, "gadget", Anything, [ GG2, AE, AE, DD1, PA2, JH, EH, EH, TT2, ] ],
[ Anything, "god", Anything, [ GG3, AA, AA, DD1, ] ],
[ Anything, "get", Anything, [ GG3, EH, EH, TT2, ] ],
[ Anything, "gen", "^", [ JH, EH, EH, NN1, ] ],
[ Anything, "gen", "#^", [ JH, EH, EH, NN1, ] ],
[ Anything, "gen", Nothing, [ JH, EH, EH, NN1, ] ],
[ Anything, "giv", Anything, [ GG2, IH, IH, VV, HH1, ] ],
[ "su", "gges", Anything, [ GG1, JH, EH, SS, ] ],
[ Anything, "great", Anything, [ GG2, ER1, EY, TT2, ] ],
[ Anything, "good", Anything, [ GG2, UH, UH, DD1, ] ],
#hmmm guest guess
[ Nothing, "gue", Anything, [ GG2, EH, ] ],
#hmm don't know about this one. argue? vague?
[ Anything, "gue", Anything, [ GG3, ] ],
[ "d", "g", Anything, [ JH, ] ],
[ "##", "g", Anything, [ GG1, ] ],
[ Anything, "g", "+", [ JH, ] ],
[ Anything, "gg", Anything, [ GG3, PA1, ] ],
[ "campai", "g", "n", Silent ],
[ "arrai", "g", "n", Silent ],
[ "ali", "g", "n", Silent ],
[ "beni", "g", "n", Silent ],
[ "arrai", "g", "n", Silent ],
[ Anything, "g", "a", [ GG1, ] ],
[ Anything, "g", "e", [ GG1, ] ],
[ Anything, "g", "i", [ GG1, ] ],
[ Anything, "g", "y", [ GG1, ] ],
[ Anything, "g", "o", [ GG2, ] ],
[ Anything, "g", "u", [ GG2, ] ],
[ Anything, "g", "l", [ GG2, ] ],
[ Anything, "g", "r", [ GG2, ] ],
[ Anything, "g", Nothing, [ GG3, ] ],
[ "n", "g", Anything, [ GG3, ] ],
[ Anything, "g", Anything, [ GG3, ] ],
]
#h 6
r_h = [
[ Anything, "honor", Anything, [ AO, NN1, ER2, ] ],
[ Anything, "heard", Anything, [ HH1, ER2, DD1, ] ],
[ Anything, "height", Anything, [ HH1, AY, TT2, ] ],
[ Anything, "honest", Anything, [ AO, NN1, EH, SS, TT2, ] ],
[ Anything, "hood", Anything, [ HH1, UH, UH, DD1, ] ],
[ "ab", "hor", Anything, [ OW, ER2, ] ],
[ Anything, "heavy", Anything, [ HH1, AE, VV, IY, ] ],
[ Anything, "heart", Anything, [ HH1, AA, ER1, TT2, ] ],
[ Anything, "half", Anything, [ HH1, AE, AE, FF, ] ],
[ Anything, "hive", Anything, [ HH1, AA, AY, VV, ] ],
[ Anything, "heavi", ":#", [ HH1, AE, VV, IY, ] ],
[ Nothing, "hav", Anything, [ HH1, AE, VV, HH1, ] ],
[ Anything, "ha", Nothing, [ HH1, AA, AA, ] ],
[ Nothing, "hi", Nothing, [ HH1, AA, AY, ] ],
[ Anything, "he", "t", [ HH1, AE, ] ],
[ Anything, "he", "x", [ HH1, AE, ] ],
[ Anything, "hy", Anything, [ HH1, AA, AY, ] ],
[ Nothing, "hang", Anything, [ HH1, AE, NG, ] ],
[ | |
<reponame>brettin/liquidhandling
import json
from liquidhandling import Properties
STEP_DELIMITER = "!@#$"
class SoloSoft:
def __init__(self, filename=None, plateList=None, pipeline=None):
self.filename = None
self.plateList = []
self.pipeline = []
# *Open protocol file for editing
try:
if filename != None:
self.setFile(filename)
except Exception as error:
print("Error creating SoloSoft protocol with filename %s" % filename)
print(error)
return
# *Set plate list
try:
if plateList != None:
self.setPlates(plateList)
else:
self.setPlates(
[
"Empty",
"Empty",
"Empty",
"Empty",
"Empty",
"Empty",
"Empty",
"Empty",
]
)
except Exception as error:
print("Error setting Plate List")
print(error)
return
# *Set pipeline, if we're expanding on an existing pipeline
try:
if pipeline != None:
self.setPipeline(pipeline)
else:
self.initializePipeline()
except Exception as error:
print("Error setting pipeline")
print(error)
def setFile(self, filename):
if not isinstance(filename, str):
raise TypeError("filename must be a string.")
else:
self.filename = filename
def setPlates(self, plateList):
if not isinstance(plateList, list):
raise TypeError("plateList must be a list of strings.")
else:
self.plateList = plateList
def setPipeline(self, pipeline):
if not isinstance(pipeline, list):
raise TypeError("pipeline should be a list")
else:
self.pipeline = pipeline
def initializePipeline(self):
self.setPipeline([])
def removeStep(self, position=-1):
try:
self.pipeline.remove(position)
except:
print("Error removing step at position %i in pipeline" % position)
def savePipeline(self, filename=None, CRLF=True):
if filename == None:
if self.filename != None:
filename = self.filename
else:
raise BaseException("Need to specify a file to save pipeline")
if CRLF:
newline = "\r\n"
else:
newline = ""
with open(filename, "w", newline=newline) as file:
for plate in self.plateList:
file.write(str(plate) + "\n")
for step in self.pipeline:
for item in step:
if isinstance(item, list):
if len(item) > 0 and isinstance(item[0], list):
for line in item:
for number in line[:-1]:
file.write(str(number))
file.write(",")
file.write(str(line[-1]))
file.write("\n")
else:
for number in item:
file.write(str(number) + "\n")
else:
file.write(str(item) + "\n")
def pipelineToJSON(self, json_file=None, pipeline=None, plateList=None):
if pipeline != None:
if not isinstance(pipeline, list):
raise TypeError("pipeline should be a list")
else:
pipeline = self.pipeline
if plateList != None:
if not isinstance(plateList, list):
raise TypeError("platelist should be a list")
else:
plateList = self.plateList
json_data = {}
json_data["metadata"] = {"spec_version": Properties.SPEC_VERSION}
json_data["pipeline_type"] = "SoloSoft"
json_data["platelist"] = plateList
steps = []
for step in pipeline:
# step = pipeline[28]
# if True:
step_extraction_function = self.jsonify[step[0]]
step_data = {}
step_data["step_definition"] = step_extraction_function(self, step)
steps.append(step_data)
json_data["steps"] = steps
return json_data
def jsonToPipeline(self, json_data, inplace=True):
if isinstance(json_data, str):
json_local = json.loads(json_data)
elif isinstance(json_data, dict):
json_local = json_data
else:
print("json needs to be either a dict or string.")
return
steps = []
for step in json_local["steps"]:
params = {}
step_function = None
for key in step["step_definition"]:
if key == "step_type":
step_function = self.pipelinify[step["step_definition"][key]]
else:
params[key] = step["step_definition"][key]
steps.append(step_function(self=self, inplace=False, **params))
params = {}
if inplace:
self.setPipeline = steps
return steps
# * SOLOSoft Pipeline Functions
def getTip(
self,
position="Position1",
disposal="TipDisposal",
num_tips=8,
auto_tip_selection=True,
count_tips_from_last_channel=False,
index=None,
inplace=True,
):
properties_list = ["GetTip", position, disposal, num_tips]
if auto_tip_selection:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend([0, count_tips_from_last_channel, STEP_DELIMITER])
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyGetTip(self, step):
json_data = {}
json_data["step_type"] = "GetTip"
json_data["position"] = step[1]
json_data["disposal"] = step[2]
json_data["num_tips"] = step[3]
json_data["auto_tip_selection"] = step[4]
json_data["count_tips_from_last_channel"] = step[6]
return json_data
def shuckTip(self, disposal="TipDisposal", index=None, inplace=True):
properties_list = ["ShuckTip", disposal, STEP_DELIMITER]
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyShuckTip(self, step):
json_data = {}
json_data["step_type"] = "ShuckTip"
json_data["disposal"] = step[1]
return json_data
def loop(self, iterations=-1, index=None, inplace=True):
properties_list = ["Loop", iterations, STEP_DELIMITER]
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyLoop(self, step):
json_data = {}
json_data["step_type"] = "Loop"
json_data["iterations"] = step[1]
return json_data
def endLoop(self, index=None, inplace=True):
properties_list = ["EndLoop", STEP_DELIMITER]
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyEndLoop(self, step):
json_data = {}
json_data["step_type"] = "EndLoop"
return json_data
def aspirate(
self,
position="Position1",
aspirate_volume_to_named_point=False,
aspirate_volume_single=0,
syringe_speed=100,
start_by_emptying_syringe=True,
increment_column_order=False,
aspirate_point="Position1",
aspirate_shift=[0, 0, 0],
do_tip_touch=False,
tip_touch_shift=[0, 0, 0],
file_data_path="",
multiple_wells=1,
backlash=0,
pre_aspirate=0,
mix_at_start=False,
mix_cycles=0,
mix_volume=0,
dispense_height=0,
delay_after_dispense=0,
aspirate_volumes=None,
dwell_after_aspirate=0,
find_bottom_of_vessel=False,
reverse_order=False,
post_aspirate=0,
move_while_pipetting=False,
move_distance=[0, 0, 0],
index=None,
inplace=True,
):
properties_list = [
"Aspirate",
position,
aspirate_volume_single,
2,
syringe_speed,
]
if start_by_emptying_syringe:
properties_list.append(1)
else:
properties_list.append(0)
if aspirate_volume_to_named_point:
properties_list.extend([False, True])
else:
properties_list.extend([True, False])
if increment_column_order:
properties_list.extend([False, True])
else:
properties_list.extend([True, False])
properties_list.extend([aspirate_point, aspirate_shift])
if do_tip_touch:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[tip_touch_shift, file_data_path, multiple_wells, backlash, pre_aspirate]
)
if mix_at_start:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[mix_cycles, mix_volume, "a", 0, 0, dispense_height, delay_after_dispense]
)
if aspirate_volumes != None:
properties_list.append(aspirate_volumes)
else:
properties_list.append(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
properties_list.append(dwell_after_aspirate)
if find_bottom_of_vessel:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.append(5) # ? Myterious 1 or 2 digit integer
if reverse_order:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.append(post_aspirate)
if move_while_pipetting:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend([move_distance, STEP_DELIMITER])
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyAspirate(self, step):
json_data = {}
json_data["step_type"] = "Aspirate"
json_data["position"] = step[1]
json_data["aspirate_volume_single"] = step[2]
json_data["syringe_speed"] = step[4]
json_data["start_by_emptying_syringe"] = step[5]
json_data["aspirate_volume_to_named_point"] = step[7]
json_data["increment_column_order"] = step[9]
json_data["aspirate_point"] = step[10]
json_data["aspirate_shift"] = step[11]
json_data["do_tip_touch"] = step[12]
json_data["tip_touch_shift"] = step[13]
json_data["file_data_path"] = step[14]
json_data["multiple_wells"] = step[15]
json_data["backlash"] = step[16]
json_data["pre_aspirate"] = step[17]
json_data["mix_at_start"] = step[18]
json_data["mix_cycles"] = step[19]
json_data["mix_volume"] = step[20]
json_data["dispense_height"] = step[24]
json_data["delay_after_dispense"] = step[25]
json_data["aspirate_volumes"] = step[26]
json_data["dwell_after_aspirate"] = step[27]
json_data["find_bottom_of_vessel"] = step[28]
json_data["reverse_order"] = step[30]
json_data["post_aspirate"] = step[31]
json_data["move_while_pipetting"] = step[32]
json_data["move_distance"] = step[33]
return json_data
def dispense(
self,
position="Position1",
dispense_volume_single=0,
syringe_speed=100,
backlash=0,
dispense_volume_to_named_point=False,
increment_column_order=False,
dispense_point="Position1",
dispense_shift=[0, 0, 0],
do_tip_touch=False,
tip_touch_shift=[0, 0, 0],
file_data_path="",
multiple_wells=1,
dwell_after_dispense=0,
blowoff=0,
mix_at_finish=False,
mix_cycles=0,
mix_volume=0,
aspirate_height=0,
delay_after_aspirate=0,
dispense_volumes=None,
reverse_order=False,
move_while_pipetting=False,
move_distance=[0, 0, 0],
index=None,
inplace=True,
):
properties_list = [
"Dispense",
position,
dispense_volume_single,
2,
syringe_speed,
backlash,
]
if dispense_volume_to_named_point:
properties_list.extend([False, True])
else:
properties_list.extend([True, False])
if increment_column_order:
properties_list.extend([False, True])
else:
properties_list.extend([True, False])
properties_list.extend([dispense_point, dispense_shift])
if do_tip_touch:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[
tip_touch_shift,
file_data_path,
multiple_wells,
dwell_after_dispense,
blowoff,
]
)
if mix_at_finish:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend(
[mix_cycles, mix_volume, "a", aspirate_height, delay_after_aspirate]
)
if dispense_volumes != None:
properties_list.append(dispense_volumes)
else:
properties_list.append(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
if reverse_order:
properties_list.append(1)
else:
properties_list.append(0)
if move_while_pipetting:
properties_list.append(1)
else:
properties_list.append(0)
properties_list.extend([move_distance, STEP_DELIMITER])
if inplace:
if index != None:
self.pipeline.insert(index, properties_list)
else:
self.pipeline.append(properties_list)
return properties_list
def jsonifyDispense(self, step):
json_data = {}
json_data["step_type"] = "Dispense"
json_data["position"] = step[1]
json_data["dispense_volume_single"] = step[2]
json_data["syringe_speed"] = step[4]
json_data["backlash"] = step[5]
json_data["dispense_volume_to_named_point"] = step[7]
json_data["increment_column_order"] = step[9]
json_data["dispense_point"] = step[10]
json_data["dispense_shift"] = step[11]
json_data["do_tip_touch"] = step[12]
json_data["tip_touch_shift"] = step[13]
json_data["file_data_path"] = step[14]
json_data["multiple_wells"] = step[15]
json_data["dwell_after_dispense"] = step[16]
json_data["blowoff"] = step[17]
json_data["mix_at_finish"] = step[18]
json_data["mix_cycles"] = step[19]
json_data["mix_volume"] = step[20]
json_data["dispense_height"] = step[22]
json_data["delay_after_aspirate"] = step[23]
json_data["dispense_volumes"] = step[24]
json_data["reverse_order"] = step[25]
json_data["move_while_pipetting"] = step[26]
json_data["move_distance"] = step[27]
return json_data
def prime(
self,
position="Position1",
syringe_speed=100,
fill_syringe=False,
empty_syringe=True,
aspirate_volume=False,
dispense_volume=False,
volume=0,
index=None,
inplace=True,
):
properties_list = [
"Prime",
syringe_speed,
True, # ? Unclear what this is
False, # ? Unclear what this is
False, # ? Unclear what this is
0, # ? Unclear what this is
"a", # ? Unclear what this is
2, # ? Unclear what this is
True, # | |
<reponame>pixlra/HARP-fork
#!/usr/bin/env python
# coding: utf8
# (c) 2014 <NAME>, <NAME>
# File licensed under GNU GPL (see HARP_License.txt)
import sys, os
__builtins__.ProjectDir = os.path.abspath("../")
assert( "HARP" in os.path.basename(ProjectDir) )
__builtins__.LibDir = ProjectDir + "/PythonLib"
__builtins__.TmpDir = ProjectDir + "/tmp"
sys.path.append(LibDir)
# GLOBAL SETTINGS, CHANGE HERE --------------------------------
X265_BinDir = ProjectDir + "/bin/Demo/x265_64Bit"
#X265_BinDir = "/home/lnt335/HARP/HARP"
VideoSrc = 0 # V4L2 video source
isVirtualCam = False #for debugging
# -------------------------------------------------------------
os.environ["LD_LIBRARY_PATH"] = X265_BinDir
from Imports_Basic import *
from OpenCV import *
from System import *
from Sequence import *
from Warp import *
from GUI.ShowPOC import *
from GUI.AnalyzePOC import *
from Encoder.X265_Encoder import *
from Encoder.HM_Encoder import *
# OPENCV VIDEO
import sys, os
sys.path.append(ProjectDir + "/Various/ThirdParty")
import opencv.video #third party
# PYQTGRAPH
from PyQt4 import QtGui #force PyQt
for gs in ['raster', 'native', 'opengl']: # force specific graphics system
if gs in sys.argv:
QtGui.QApplication.setGraphicsSystem(gs)
break
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
import pyqtgraph.ptime as ptime
from pyqtgraph.dockarea import *
import pyqtgraph.parametertree.parameterTypes as pTypes
from pyqtgraph.parametertree import Parameter, ParameterTree, ParameterItem, registerParameterType
from PyQt4.QtCore import * #reimport since pyqtgraph runs: "from PyQt4 import QtCore, QtGui"
from PyQt4.QtGui import *
# END PYQTGRAPH
cam = None
Ref = None
Cur = None
DimX = None
DimY = None
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class VirtualCam(object):
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#==========================================
def __init__(self):
#==========================================
super(VirtualCam, self).__init__()
# CHANGE HERE -------------------------------------------------
# -------------------------------------------------------------
self.Images = []
Img0 = readImg(ProjectDir + "/Various/Resources/Tohajiilee.jpg")
Img0 = cv2.resize(Img0, (640, 480), interpolation=cv2.INTER_LINEAR)
Img1 = readImg(ProjectDir + "/Various/Resources/Tohajiilee_rotated.jpg")
Img1 = cv2.resize(Img1, (640, 480), interpolation=cv2.INTER_LINEAR)
self.Images.append(Img0)
self.Images.append(Img1)
self.toggle = 0
#==========================================
def read(self):
#==========================================
if self.toggle == 0:
Image = self.Images[0]
else : Image = self.Images[1]
self.toggle = not self.toggle
time.sleep(0.015)
return None, Image
#==========================================
def release(self):
#==========================================
pass
#==========================================
def startGrabbing():
#==========================================
global VideoSrc
global cam
global DimX, DimY
global Ref, Cur
global ThreadLock
global StopGrabbingThread
global isVirtualCam
if isVirtualCam:
cam = VirtualCam()
else:
cam = opencv.video.create_capture(VideoSrc)
#-------------------------------------
# RETRIEVE CAM INFOS
#-------------------------------------
ret, frame = cam.read()
DimY, DimX = frame.shape[:2]
print "\nWebcam resolution: %dx%d\n" % (DimX, DimY)
cnt = 0
while(not StopGrabbingThread):
ret, TmpRefRGB = cam.read()
ThreadLock.acquire()
for x in range(0, 4):
ret, test = cam.read()
ret, TmpCurRGB = cam.read()
TmpCur = cv2.cvtColor(TmpCurRGB, cv2.COLOR_RGB2YUV)
TmpRef = cv2.cvtColor(TmpRefRGB, cv2.COLOR_RGB2YUV)
Ref = TmpRef
Cur = TmpCur
#MSEC = cam.get(cv2.cv.CV_CAP_PROP_FPS)
if cnt % 100 == 0:
print "Webcam captured: %d pairs" % cnt
cnt += 1
ThreadLock.release()
#==========================================
def stopGrabbing():
#==========================================
global cam
global StopGrabbingThread
StopGrabbingThread = True
ThreadLock.acquire()
cam.release()
ThreadLock.release()
GrabbingThread = threading.Thread(target=startGrabbing)
GrabbingThread.daemon = False
GrabbingThread.start()
ThreadLock = threading.Lock()
StopGrabbingThread = False
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class Demo(object):
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#==========================================
def __init__(self, GUI):
#==========================================
super(Demo, self).__init__()
# CHANGE HERE -------------------------------------------------
# -------------------------------------------------------------
self.GUI = GUI
self.p0 = None
self.use_ransac = True
os.chdir(TmpDir)
#==========================================
def processFramePair(self, Ref, Cur):
#==========================================
QP = self.GUI.p.param('Basic settings', 'QP').value()
if QP>50:
QP=50
if QP<1:
QP=1
QP = round(QP)
self.GUI.p.param('Basic settings', 'QP').setValue(QP)
isX265 = self.GUI.p.param('Basic settings', 'x265').value()
DimY, DimX = Ref.shape[:2]
assert(DimX == 640 and DimY == 480)
self.starttime = time.time()
FN_YUV = TmpDir + "/webcam_%dx%d.yuv" % ( DimX, DimY)
outfile = open(FN_YUV, "w")
#--------------------------------------
# CREATE YUV (2 FRAMES)
#--------------------------------------
write_YUV(outfile, Ref)
write_YUV(outfile, Cur)
outfile.close()
Sequ = Sequence(FN_YUV, Fr=2, FPS=30)
#--------------------------------------
# RUN X265 ENCODER
#--------------------------------------
if isX265:
global X265_BinDir
EncBin = X265_BinDir + "/x265"
INI_FN = ProjectDir + "/PythonLib/Encoder/X265_Settings.ini"
assert os.path.isfile(INI_FN), "INI file not found: " + INI_FN
Encoder = X265_Encoder( OutputDir=TmpDir, Passport="<PASSWORD>",
Name="X265", InfoStr="_Test",
EncBin=EncBin, DecBin=None, INI_FN=INI_FN, OverrideStr="",
Sequ=Sequ, QP=QP,
PSNR_Tool=None)
EncoderCmd = Encoder.get_CommandLineCall()
print "EncoderCmd: " + EncoderCmd
assert os.system(EncoderCmd)==0, "encoder cmd failed"
else:
#--------------------------------------
# RUN HM ENCODER
#--------------------------------------
EncBin = ProjectDir + "/bin/TAppEncoder"
INI_FN = ProjectDir + "/PythonLib/Encoder/HM_Encoder.ini"
assert os.path.isfile(INI_FN), "INI file not found: " + INI_FN
Encoder = HM_Encoder( OutputDir=TmpDir, Passport="<PASSWORD>",
Name="HMEnc", InfoStr="_Test",
EncBin=EncBin, DecBin=None, INI_FN=INI_FN, OverrideStr="",
Sequ=Sequ, QP=QP,
PSNR_Tool=None)
EncoderCmd = Encoder.get_CommandLineCall()
EncoderCmd += " --HARP_TmpDir=."
print "EncoderCmd: " + EncoderCmd
#DecoderCmd += " --HARP_PUs" #debug
assert os.system(EncoderCmd)==0, "encoder cmd failed"
#--------------------------------------
# RUN HM DECODER
#--------------------------------------
DecoderCmd = ProjectDir + "/bin/TAppDecoder -b " + Encoder.bitstream + " -o decoded.yuv --HARP_TmpDir=. "
#DecoderCmd += " --HARP_PUs" #debug
assert os.system(DecoderCmd)==0, "decoder cmd failed"
#-------------------------------------
# LOADING DECODER PKL
#-------------------------------------
print "LOADING DECODER PKL"
POCIdx = 0
FN = TmpDir + "/" + "PyPOC_%05d.pkl" % POCIdx
assert os.path.exists(FN), "PWD: %s, missing FN: %s" % (os.getcwd(), FN)
POC_Intra = pickle.load(open(FN, "rb" ) )
POCIdx = 1
FN = TmpDir + "/" + "PyPOC_%05d.pkl" % POCIdx
assert os.path.exists(FN), "PWD: %s, missing FN: %s" % (os.getcwd(), FN)
POC_Inter = pickle.load(open(FN, "rb" ) )
#self.GUI.p.param('Basic settings', 'Show CUs').setOpts(readonly=False, enabled=True)
Show_CUs = self.GUI.p.param('Basic settings', 'Show CUs').value()
Show_PUs = self.GUI.p.param('Basic settings', 'Show PUs').value()
Show_Modes = self.GUI.p.param('Basic settings', 'Show Modes').value()
#self.GUI.p.param('Basic settings', 'Show CUs').setValue(not self.GUI.p.param('Basic settings', 'Show CUs').value())
self.ShowPOC_Intra = ShowPOC(POC_Intra, Show_CUs, Show_PUs, Show_Modes)
self.ShowPOC_Intra.visualize()
self.ShowPOC_Inter = ShowPOC(POC_Inter, Show_CUs, Show_PUs, Show_Modes)
self.ShowPOC_Inter.visualize()
self.AnalyzePOC_Inter = AnalyzePOC(POC_Inter)
self.AnalyzePOC_Inter.analyze()
#cv2.imwrite(TmpDir + "/VizPUs.jpg", VizPUs)
#--------------------------------------
# TIMING
#--------------------------------------
NumSecs = (time.time() - self.starttime)
FPS = 1 / NumSecs
print "\nFPS = %f \n ------------------ \n" % FPS
#==========================================
def run(self):
#==========================================
try :
global Ref
global Cur
#-------------------------------------
# FOR ALL RUNS
#-------------------------------------
NumRuns = 1000
for myrun in np.arange(NumRuns):
self.processFramePair(Ref, Cur, 15)
cv2.imshow('frame', self.VizPUs)
cv2.waitKey(1)
stopGrabbing()
cv2.destroyAllWindows()
except Exception, Str: #prevent V4L2 to eat the webcam
import traceback
print "Exception!"
stopGrabbing()
cv2.destroyAllWindows()
print "EXCEPTION------------------------"
print "Unexpected ERROR:", sys.exc_info()[0]
traceback.print_tb(sys.exc_info()[2])
print Str
print "---------------------------------"
raise
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class GUIMainWindow(QtGui.QMainWindow):
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#==========================================
def __init__(self):
#==========================================
super(GUIMainWindow, self).__init__()
# CHANGE HERE -------------------------------------------------
# -------------------------------------------------------------
area = DockArea()
self.setCentralWidget(area)
self.resize(1280, 800)
self.setWindowTitle('Demo: How to use HARP with Python')
## Create docks, place them into the window one at a time.
## Note that size arguments are only a suggestion; docks will still have to
## fill the entire dock area and obey the limits of their internal widgets.
d1 = Dock("Control", size=(300,200)) ## give this dock the minimum possible size
d2 = Dock("Description", size=(300,800))
d31 = Dock("INTRA frame - Prediction Units", size=(500,300))
d32 = Dock("INTER frame - Prediction Units", size=(500,300))
#d33 = Dock("Dock3 - Transform Units", size=(500,300))
d41 = Dock("Frame Difference ", size=(100,100))
d42 = Dock("Current Frame ", size=(100,100))
d51 = Dock("CU Depths", size=(200,100))
d52 = Dock("MVs X Component", size=(200,100))
d53 = Dock("MVs Y Component", size=(200,100))
area.addDock(d2, 'left') ## place d1 at left edge of dock area (it will fill the whole space since there are no other docks yet)
area.addDock(d1, 'bottom', d2) ## place d2 at right edge of dock area
area.addDock(d31, 'right')
area.addDock(d32, 'bottom', d31)
#area.addDock(d33, 'bottom', d32)
area.addDock(d41, 'right')
area.addDock(d51, 'bottom', d41)
area.addDock(d42, 'right', d41)
area.addDock(d52, 'right', d51)
area.addDock(d53, 'right', d52)
#==========================================
def dock_ImageItem(self, Dock):
#==========================================
pgGLWidget = pg.GraphicsLayoutWidget()
ViewBox = pgGLWidget.addViewBox(invertY = True)
#ViewBox.setBackgroundColor((255,255,255))
ViewBox.setAspectLocked(True)
pgImageItem = pg.ImageItem(border='w')
ViewBox.addItem(pgImageItem)
Dock.addWidget(pgGLWidget)
return pgImageItem
#==========================================
def dock_CurveItem(self, Dock, Title, LabelX, LabelY):
#==========================================
pgGWindow= pg.GraphicsLayoutWidget()
pgPlot = pgGWindow.addPlot(title=Title)
x =[0,0,0]
y = [0,0]
pgCurveItem = pg.PlotCurveItem(x, y, stepMode=True, fillLevel=0, brush=(0, 255, 0, 80))
pgPlot.addItem(pgCurveItem)
pgPlot.setLabel('bottom', LabelX)
pgPlot.setLabel('left', LabelY)
Dock.addWidget(pgGWindow)
return pgCurveItem
self.ImageItem_d2 = dock_ImageItem(self, d2)
self.ImageItem_d31 = dock_ImageItem(self, d31)
self.ImageItem_d32 = dock_ImageItem(self, d32)
self.ImageItem_d41 = dock_ImageItem(self, d41)
self.ImageItem_d42 = dock_ImageItem(self, d42)
self.CurveItem_d51 = dock_CurveItem(self, d51, "CU Depths", "CU Depth", "Number of Occurences")
self.CurveItem_d52 = dock_CurveItem(self, d52, "MVs X Component", "Magnitude", "Number of Occurences")
self.CurveItem_d53 = dock_CurveItem(self, d53, "MVs Y Component", "Magnitude", "Number of Occurences")
params = [
{'name': 'Basic settings', 'type': 'group', 'children':
[
{'name': 'QP', 'type': 'int', 'value': 30},
{'name': 'x265', 'type': 'bool', 'value': True},
{'name': 'Show CUs', 'type': 'bool', 'value': True},
{'name': 'Show PUs', 'type': 'bool', 'value': True},
{'name': 'Show Modes', 'type': 'bool', 'value': True},
]},
]
## Create tree of Parameter objects
p = Parameter.create(name='params', type='group', children=params, readonly=False, enabled=True)
t = ParameterTree()
t.setParameters(p, showTop=False)
t.setWindowTitle('pyqtgraph example: Parameter Tree')
self.p = p
d1.addWidget(t)
MyWorkThread = WorkThread(self)
MyWorkThread.start()
Description = readImg(ProjectDir + "/Various/Resources/Special/LMS_Demo.png")
Description = cv2.transpose(cv2.cvtColor(Description, cv2.COLOR_BGR2RGB))
self.ImageItem_d2.setImage(Description, autoDownsample=True, border=(255,255,255) | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['BackupPlanArgs', 'BackupPlan']
@pulumi.input_type
class BackupPlanArgs:
def __init__(__self__, *,
backup_time: pulumi.Input[str],
cluster_id: pulumi.Input[str],
data_center_id: pulumi.Input[str],
active: Optional[pulumi.Input[bool]] = None,
backup_period: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a BackupPlan resource.
:param pulumi.Input[str] backup_time: The start time of the backup task each day. The time is displayed in UTC and denoted by Z.
:param pulumi.Input[str] cluster_id: The ID of the cluster for the backup.
:param pulumi.Input[str] data_center_id: The ID of the data center for the backup in the cluster.
:param pulumi.Input[bool] active: Specifies whether to activate the backup plan. Valid values: `True`, `False`. Default value: `True`.
:param pulumi.Input[str] backup_period: The backup cycle. Valid values: `Friday`, `Monday`, `Saturday`, `Sunday`, `Thursday`, `Tuesday`, `Wednesday`.
:param pulumi.Input[int] retention_period: The duration for which you want to retain the backup. Valid values: 1 to 30. Unit: days. Default value: `30`.
"""
pulumi.set(__self__, "backup_time", backup_time)
pulumi.set(__self__, "cluster_id", cluster_id)
pulumi.set(__self__, "data_center_id", data_center_id)
if active is not None:
pulumi.set(__self__, "active", active)
if backup_period is not None:
pulumi.set(__self__, "backup_period", backup_period)
if retention_period is not None:
pulumi.set(__self__, "retention_period", retention_period)
@property
@pulumi.getter(name="backupTime")
def backup_time(self) -> pulumi.Input[str]:
"""
The start time of the backup task each day. The time is displayed in UTC and denoted by Z.
"""
return pulumi.get(self, "backup_time")
@backup_time.setter
def backup_time(self, value: pulumi.Input[str]):
pulumi.set(self, "backup_time", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Input[str]:
"""
The ID of the cluster for the backup.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="dataCenterId")
def data_center_id(self) -> pulumi.Input[str]:
"""
The ID of the data center for the backup in the cluster.
"""
return pulumi.get(self, "data_center_id")
@data_center_id.setter
def data_center_id(self, value: pulumi.Input[str]):
pulumi.set(self, "data_center_id", value)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to activate the backup plan. Valid values: `True`, `False`. Default value: `True`.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter(name="backupPeriod")
def backup_period(self) -> Optional[pulumi.Input[str]]:
"""
The backup cycle. Valid values: `Friday`, `Monday`, `Saturday`, `Sunday`, `Thursday`, `Tuesday`, `Wednesday`.
"""
return pulumi.get(self, "backup_period")
@backup_period.setter
def backup_period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_period", value)
@property
@pulumi.getter(name="retentionPeriod")
def retention_period(self) -> Optional[pulumi.Input[int]]:
"""
The duration for which you want to retain the backup. Valid values: 1 to 30. Unit: days. Default value: `30`.
"""
return pulumi.get(self, "retention_period")
@retention_period.setter
def retention_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retention_period", value)
@pulumi.input_type
class _BackupPlanState:
def __init__(__self__, *,
active: Optional[pulumi.Input[bool]] = None,
backup_period: Optional[pulumi.Input[str]] = None,
backup_time: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
data_center_id: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering BackupPlan resources.
:param pulumi.Input[bool] active: Specifies whether to activate the backup plan. Valid values: `True`, `False`. Default value: `True`.
:param pulumi.Input[str] backup_period: The backup cycle. Valid values: `Friday`, `Monday`, `Saturday`, `Sunday`, `Thursday`, `Tuesday`, `Wednesday`.
:param pulumi.Input[str] backup_time: The start time of the backup task each day. The time is displayed in UTC and denoted by Z.
:param pulumi.Input[str] cluster_id: The ID of the cluster for the backup.
:param pulumi.Input[str] data_center_id: The ID of the data center for the backup in the cluster.
:param pulumi.Input[int] retention_period: The duration for which you want to retain the backup. Valid values: 1 to 30. Unit: days. Default value: `30`.
"""
if active is not None:
pulumi.set(__self__, "active", active)
if backup_period is not None:
pulumi.set(__self__, "backup_period", backup_period)
if backup_time is not None:
pulumi.set(__self__, "backup_time", backup_time)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if data_center_id is not None:
pulumi.set(__self__, "data_center_id", data_center_id)
if retention_period is not None:
pulumi.set(__self__, "retention_period", retention_period)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to activate the backup plan. Valid values: `True`, `False`. Default value: `True`.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter(name="backupPeriod")
def backup_period(self) -> Optional[pulumi.Input[str]]:
"""
The backup cycle. Valid values: `Friday`, `Monday`, `Saturday`, `Sunday`, `Thursday`, `Tuesday`, `Wednesday`.
"""
return pulumi.get(self, "backup_period")
@backup_period.setter
def backup_period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_period", value)
@property
@pulumi.getter(name="backupTime")
def backup_time(self) -> Optional[pulumi.Input[str]]:
"""
The start time of the backup task each day. The time is displayed in UTC and denoted by Z.
"""
return pulumi.get(self, "backup_time")
@backup_time.setter
def backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_time", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the cluster for the backup.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="dataCenterId")
def data_center_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the data center for the backup in the cluster.
"""
return pulumi.get(self, "data_center_id")
@data_center_id.setter
def data_center_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_center_id", value)
@property
@pulumi.getter(name="retentionPeriod")
def retention_period(self) -> Optional[pulumi.Input[int]]:
"""
The duration for which you want to retain the backup. Valid values: 1 to 30. Unit: days. Default value: `30`.
"""
return pulumi.get(self, "retention_period")
@retention_period.setter
def retention_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retention_period", value)
class BackupPlan(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
backup_period: Optional[pulumi.Input[str]] = None,
backup_time: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
data_center_id: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Provides a Cassandra Backup Plan resource.
For information about Cassandra Backup Plan and how to use it, see [What is Backup Plan](https://www.alibabacloud.com/help/doc-detail/157522.htm).
> **NOTE:** Available in v1.128.0+.
## Import
Cassandra Backup Plan can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cassandra/backupPlan:BackupPlan example <cluster_id>:<data_center_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] active: Specifies whether to activate the backup plan. Valid values: `True`, `False`. Default value: `True`.
:param pulumi.Input[str] backup_period: The backup cycle. Valid values: `Friday`, `Monday`, `Saturday`, `Sunday`, `Thursday`, `Tuesday`, `Wednesday`.
:param pulumi.Input[str] backup_time: The start time of the backup task each day. The time is displayed in UTC and denoted by Z.
:param pulumi.Input[str] cluster_id: The ID of the cluster for the backup.
:param pulumi.Input[str] data_center_id: The ID of the data center for the backup in the cluster.
:param pulumi.Input[int] retention_period: The duration for which you want to retain the backup. Valid values: 1 to 30. Unit: days. Default value: `30`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BackupPlanArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Cassandra Backup Plan resource.
For information about Cassandra Backup Plan and how to use it, see [What is Backup Plan](https://www.alibabacloud.com/help/doc-detail/157522.htm).
> **NOTE:** Available in v1.128.0+.
## Import
Cassandra Backup Plan can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cassandra/backupPlan:BackupPlan example <cluster_id>:<data_center_id>
```
:param str resource_name: The name of the resource.
:param BackupPlanArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackupPlanArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
backup_period: Optional[pulumi.Input[str]] = None,
backup_time: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
data_center_id: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BackupPlanArgs.__new__(BackupPlanArgs)
__props__.__dict__["active"] = active
__props__.__dict__["backup_period"] = backup_period
if backup_time is None and not opts.urn:
raise TypeError("Missing required property 'backup_time'")
__props__.__dict__["backup_time"] = backup_time
if cluster_id is None and not opts.urn:
raise TypeError("Missing required property 'cluster_id'")
__props__.__dict__["cluster_id"] = cluster_id
if data_center_id is None and not opts.urn:
raise TypeError("Missing required property 'data_center_id'")
__props__.__dict__["data_center_id"] = data_center_id
__props__.__dict__["retention_period"] = retention_period
super(BackupPlan, __self__).__init__(
'alicloud:cassandra/backupPlan:BackupPlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: | |
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cb u0 {1,S}
5 Cs u0 {1,S}
6 C u0 {2,D}
7 C u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)CbCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 617,
label = "Cs-(Cds-Cds)(Cds-Cds)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cb u0 {1,S}
5 Cs u0 {1,S}
6 Cd u0 {2,D}
7 Cd u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 618,
label = "Cs-(Cds-Cdd)(Cds-Cds)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cb u0 {1,S}
5 Cs u0 {1,S}
6 Cdd u0 {2,D}
7 Cd u0 {3,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)(Cds-Cds)CbCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 619,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cds)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Cb u0 {1,S}
6 Cs u0 {1,S}
7 Cd u0 {3,D}
8 O2d u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cds)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Cb u0 {1,S}
6 Cs u0 {1,S}
7 Cd u0 {3,D}
8 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 620,
label = "Cs-(Cds-Cdd-Cd)(Cds-Cds)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Cb u0 {1,S}
6 Cs u0 {1,S}
7 Cd u0 {3,D}
8 C u0 {4,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)CbCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 621,
label = "Cs-(Cds-Cdd)(Cds-Cdd)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cb u0 {1,S}
5 Cs u0 {1,S}
6 Cdd u0 {2,D}
7 Cdd u0 {3,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CbCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 622,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {6,S} {7,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {5,D}
4 Cdd u0 {2,D} {8,D}
5 Cdd u0 {3,D} {9,D}
6 Cb u0 {1,S}
7 Cs u0 {1,S}
8 O2d u0 {4,D}
9 O2d u0 {5,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 623,
label = "Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {6,S} {7,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {5,D}
4 Cdd u0 {2,D} {8,D}
5 Cdd u0 {3,D} {9,D}
6 Cb u0 {1,S}
7 Cs u0 {1,S}
8 O2d u0 {4,D}
9 C u0 {5,D}
""",
thermo = u'Cs-(Cds-Cdd-O2d)(Cds-Cds)CbCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {6,S} {7,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {5,D}
4 Cdd u0 {2,D} {8,D}
5 Cdd u0 {3,D} {9,D}
6 Cb u0 {1,S}
7 Cs u0 {1,S}
8 S2d u0 {4,D}
9 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {6,S} {7,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {5,D}
4 Cdd u0 {2,D} {8,D}
5 Cdd u0 {3,D} {9,D}
6 Cb u0 {1,S}
7 Cs u0 {1,S}
8 S2d u0 {4,D}
9 C u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 624,
label = "Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CbCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {6,S} {7,S}
2 Cd u0 {1,S} {4,D}
3 Cd u0 {1,S} {5,D}
4 Cdd u0 {2,D} {8,D}
5 Cdd u0 {3,D} {9,D}
6 Cb u0 {1,S}
7 Cs u0 {1,S}
8 C u0 {4,D}
9 C u0 {5,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)CbCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 625,
label = "Cs-CtCtCdsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Ct u0 {1,S}
3 Ct u0 {1,S}
4 [Cd,CO] u0 {1,S}
5 Cs u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)CtCtCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 626,
label = "Cs-(Cds-O2d)CtCtCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {6,D}
3 Ct u0 {1,S}
4 Ct u0 {1,S}
5 Cs u0 {1,S}
6 O2d u0 {2,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 627,
label = "Cs-(Cds-Cd)CtCtCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Ct u0 {1,S}
4 Ct u0 {1,S}
5 Cs u0 {1,S}
6 C u0 {2,D}
""",
thermo = u'Cs-(Cds-Cds)CtCtCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 628,
label = "Cs-(Cds-Cds)CtCtCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Ct u0 {1,S}
4 Ct u0 {1,S}
5 Cs u0 {1,S}
6 Cd u0 {2,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([3.99,7.36,8.89,9.58,9.76,9.16,7.25],'cal/(mol*K)','+|-',[0.13,0.13,0.13,0.13,0.13,0.13,0.13]),
H298 = (5.1,'kcal/mol','+|-',0.26),
S298 = (-34.88,'cal/(mol*K)','+|-',0.13),
),
shortDesc = u"""Cs-CtCtCdCs BOZZELLI =3D Cs/Cd2/Cs2 + (Cs/Cs3/Ct - Cs/Cs4)""",
longDesc =
u"""
""",
)
entry(
index = 629,
label = "Cs-(Cds-Cdd)CtCtCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Ct u0 {1,S}
4 Ct u0 {1,S}
5 Cs u0 {1,S}
6 Cdd u0 {2,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)CtCtCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 630,
label = "Cs-(Cds-Cdd-O2d)CtCtCs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 Cs u0 {1,S}
7 O2d u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)CtCtCs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 Cs u0 {1,S}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 631,
label = "Cs-(Cds-Cdd-Cd)CtCtCs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 Cs u0 {1,S}
7 C u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)CtCtCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 632,
label = "Cs-CbCtCdsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Ct u0 {1,S}
4 [Cd,CO] u0 {1,S}
5 Cs u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)CbCtCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 633,
label = "Cs-(Cds-O2d)CbCtCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 Cs u0 {1,S}
6 O2d u0 {2,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cds)CtCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 634,
label = "Cs-(Cds-Cd)CbCtCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 Cs u0 {1,S}
6 C u0 {2,D}
""",
thermo = u'Cs-(Cds-Cds)CbCtCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 635,
label = "Cs-(Cds-Cds)CbCtCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 Cs u0 {1,S}
6 Cd u0 {2,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([3.99,7.36,8.89,9.58,9.76,9.16,7.25],'cal/(mol*K)','+|-',[0.13,0.13,0.13,0.13,0.13,0.13,0.13]),
H298 = (5.1,'kcal/mol','+|-',0.26),
S298 = (-34.88,'cal/(mol*K)','+|-',0.13),
),
shortDesc = u"""Cs-CbCtCdCs BOZZELLI =3D Cs/Cb/Cd/Cs2 + (Cs/Cs3/Ct - Cs/Cs4)""",
longDesc =
u"""
""",
)
entry(
index = 636,
label = "Cs-(Cds-Cdd)CbCtCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 Cs u0 {1,S}
6 Cdd u0 {2,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)CbCtCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 637,
label = "Cs-(Cds-Cdd-O2d)CbCtCs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 Cs u0 {1,S}
7 O2d u0 {3,D}
""",
thermo = u'Cs-(Cds-Cdd-O2d)(Cds-Cds)CtCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)CbCtCs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 Cs u0 {1,S}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 638,
label = "Cs-(Cds-Cdd-Cd)CbCtCs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 | |
#!/usr/bin/env python3
# Script to control LimitlessLED DualWhite, RGB+W and RGBWW/CW bulbs through v6 WiFi Bridge.
#
# <NAME>'s work for Domoticz has been a huge influence while making this script,
# which is tailored to be a CLI tool for integration with HA-bridge and Amazon Echo (Alexa).
#
# Requires Python 3.
#
# Used http://www.limitlessled.com/dev/ as reference and for examples, to extract
# UDP codes valid for API v6. Also, since the official API description is not complete,
# I had to hunt for the info and found it here at:
# https://github.com/mwittig/node-milight-promise/blob/master/src/commandsV6.js
# which enabled me to implement DualWhite bulbs support, thanks!
import socket
import sys
import time
import getopt
import binascii
import ipaddress
import select
__version__ = str(0.7)
__changed__ = str(20170113)
__author__ = "<NAME>"
__email__ = "<EMAIL>"
logfile = "limitlessled-cli.log" # Filename where some debug messages are written to
help = """LimitlessLED-CLI v""" + str(__version__) + """ (""" + str(__changed__) + """)
Copyright (c) """ + __changed__[:4] + """ by """ + __author__ + """ <""" + __email__ + """>
Usage: """ + sys.argv[0] + """ -c <command> -t <device_type> -b <bridge_ip> -z <zone> -p <extra param>")
Options:
-c,--command provide a command to be sent to bridge [mandatory]:
- Universal: ON, OFF, NIGHT
- DualWhite: BRIGHTUP, BRIGHTDOWN, BRIGHTMAX,
WARMER, COOLER
- RGBW & RGBW/WW/CW: BRIGHT, KELVIN, MODE, MODEFASTER,
MODESLOWER, WHITE, COLOR
- RGBWWCW: SATURATION
- iBox Bridge: WHITE, BRIGHT, MODE, COLOR
NOTE: There are predefined color sets than can be called with
the following commands:
LAVENDER, BLUE, AQUA, GREEN, LIME, YELLOW, ORANGE
-t,--type type of bulb the command is being sent to [mandatory]:
- WHITE (DualWhite) - white only bulbs
- RGBW (RGBWW or RGBCW) - previous generation
- RGBWW (RGBW/WW/CW) - latest generation
-b,--bridge_ip provide an IP address of the bridge [mandatory]
-f,--force force repeated attempts when device type is WHITE
INFO: This is a hack to allow HA-bridge to first send a BRIGHTMAX
command and then send a correct number of BRIGHTDOWN messages
to set the brightness of the bulb based on the percentage
value of the optional parameter. So far this is my best
attempt at making it possible. If you have a better idea
for such functionality, please let me know!
-h,--help print this help screen
-o,--override override the command (for debug purposes, use discouraged)
-p,--parameter optional parameter depending on selected command and type:
- Percentage (0-100) for brightness, examples:
0% = 2700K (Warm White)
25% = 3650K
50% = 4600K
75% = 5550K
100% = 6500K (Cool White)
- Percentage (0-100) for saturation [RGBW/WW/CW only]
- Range 1-9 for "disco" modes
- Range 0-255 for color
-v,--verbose verbose output on screen
-z,--zone provide zone/group of lamps in range 1-4 [defaults to 0 (all)]
Examples:
1. Turn on RGBWW zone 2:
""" + sys.argv[0] + """ -c ON -t RGBWW -b 192.168.1.120 -z 2
2. Set RGBWW zone 2 to white:
""" + sys.argv[0] + """ -c WHITE -t RGBWW -b 192.168.1.120 -z 2
3. Set RGBWW zone 2 to disco mode 6:
""" + sys.argv[0] + """ -c MODE -t RGBWW -b 192.168.1.120 -z 2 -p 6
4. Set all WHITE zones to max brightness:
""" + sys.argv[0] + """ -c BRIGHTMAX -t WHITE -b 192.168.1.120
5. Increase brightness for all WHITE zones by 20%:
""" + sys.argv[0] + """ -c BRIGHTUP -t WHITE -b 192.168.1.120 -p 20
6. Decrease brightness for all WHITE zones by 20%:
""" + sys.argv[0] + """ -c BRIGHTDOWN -t WHITE -b 192.168.1.120 -p 20
7. Turn night mode on iBox Bridge LED lamp:
""" + sys.argv[0] + """ -c NIGHT -t IBOX -b 192.168.1.120 -v
Debug usage:
Set iBox Bridge LED lamp to color red:
""" + sys.argv[0] + """ -t IBOX -b 192.168.1.120 -v -o -p "31 00 00 00 01 FF FF FF FF 00 00"
"""
def usage():
print(help)
def log(pref, message):
debug_message = time.ctime() + " [" + pref + "] " + message
if verbose:
print(debug_message)
file = open(logfile, "a")
file.write(debug_message + "\n")
file.close()
def get_command(usercommand, device_type, zone):
command_bridgeled = {
"ON" : "31 00 00 XX 03 03 00 00 00 00 00",
"OFF" : "31 00 00 XX 03 04 00 00 00 00 00",
"WHITE" : "31 00 00 XX 03 05 00 00 00 00 00",
"NIGHT" : "31 00 00 XX 03 06 00 00 00 00 00",
"BRIGHT" : "31 00 00 XX 02 00 00 00 00 00 00",
"MODE" : "31 00 00 XX 04 00 00 00 00 00 00",
"COLOR" : "31 00 00 XX 01 00 00 00 00 00 00",
"RED" : "31 00 00 XX 01 FF FF FF FF 00 00",
"LAVENDER" : "31 00 00 XX 01 D9 D9 D9 D9 00 00",
"BLUE" : "31 00 00 XX 01 BA BA BA BA 00 00",
"AQUA" : "31 00 00 XX 01 85 85 85 85 00 00",
"GREEN" : "31 00 00 XX 01 7A 7A 7A 7A 00 00",
"LIME" : "31 00 00 XX 01 54 54 54 54 00 00",
"YELLOW" : "31 00 00 XX 01 3B 3B 3B 3B 00 00",
"ORANGE" : "31 00 00 XX 01 1E 1E 1E 1E 00 00",
}
command_dualwhite = {
"ON" : "31 00 00 XX 01 07 00 00 00 00 00",
"OFF" : "31 00 00 XX 01 08 00 00 00 00 00",
"BRIGHTUP" : "31 00 00 XX 01 01 00 00 00 00 00",
"BRIGHTDOWN" : "31 00 00 XX 01 02 00 00 00 00 00",
"BRIGHTMAX" : "31 00 00 XX 81 07 00 00 00 00 00",
"NIGHT" : "31 00 00 XX 01 06 00 00 00 00 00",
"WARMER" : "31 00 00 XX 01 03 00 00 00 00 00",
"COOLER" : "31 00 00 XX 01 04 00 00 00 00 00",
}
command_colour = {
"ON" : "31 00 00 XX 04 01 00 00 00 00 00",
"OFF" : "31 00 00 XX 04 02 00 00 00 00 00",
"NIGHT" : "31 00 00 XX 04 05 00 00 00 00 00",
"BRIGHT" : "31 00 00 XX 03 00 00 00 00 00 00",
"WHITE" : "31 00 00 XX 05 64 00 00 00 00 00",
"KELVIN" : "31 00 00 XX 05 00 00 00 00 00 00",
"SATURATION" : "31 00 00 XX 02 00 00 00 00 00 00",
"MODE" : "31 00 00 XX 04 00 00 00 00 00 00",
"MODEFASTER" : "31 00 00 XX 04 03 00 00 00 00 00",
"MODESLOWER" : "31 00 00 XX 04 04 00 00 00 00 00",
"RED" : "31 00 00 XX 01 00 00 00 FF 00 00",
"LAVENDER" : "31 00 00 XX 01 00 00 00 D9 00 00",
"BLUE" : "31 00 00 XX 01 00 00 00 BA 00 00",
"AQUA" : "31 00 00 XX 01 00 00 00 85 00 00",
"GREEN" : "31 00 00 XX 01 00 00 00 7A 00 00",
"LIME" : "31 00 00 XX 01 00 00 00 54 00 00",
"YELLOW" : "31 00 00 XX 01 00 00 00 3B 00 00",
"ORANGE" : "31 00 00 XX 01 00 00 00 1E 00 00",
}
if override:
command = str(usercommand)
elif device_type == "IBOX":
device = "00"
try:
command = command_bridgeled.get(usercommand).replace("XX", device)
except:
log("ERROR", "Command not found!")
sys.exit(1)
elif device_type == "WHITE":
device = "01"
try:
command = command_dualwhite.get(usercommand).replace("XX", device)
except:
log("ERROR", "Command not found!")
sys.exit(1)
elif device_type == "RGBW":
device = "07"
try:
command = command_colour.get(usercommand).replace("XX", device)
command = command[:12] + "03" + command[14:]
except:
log("ERROR", "Command not found!")
sys.exit(1)
elif device_type == "RGBWW":
device = "08"
try:
command = command_colour.get(usercommand).replace("XX", device)
except:
log("ERROR", "Command not found!")
sys.exit(1)
else:
log("ERROR", "Unknown device type!")
sys.exit(1)
if usercommand == "BRIGHT" or usercommand == "KELVIN":
if device_type == "RGBW":
command = command[:12] + "02" + command[14:]
try:
percent = int(param)
percent = hex(percent)[2:].zfill(2).upper()
command = command[:15] + percent + command[17:]
except:
log("ERROR", "No extra parameter provided, aborting!")
sys.exit(1)
if usercommand == "MODE":
try:
mode = int(param)
if mode >= 1 and mode <= 9:
mode = hex(mode)[2:].zfill(2).upper()
command = command[:15] + mode + command[17:]
else:
log("ERROR", "Parameter out of range (1-9).")
sys.exit(1)
except:
log("ERROR", "No extra parameter provided, aborting!")
sys.exit(1)
if usercommand == "SATURATION":
try:
saturation = int(param)
if saturation >= 0 and saturation <= 100:
saturation = hex(saturation)[2:].zfill(2).upper()
command = command[:15] + saturation + command[17:]
else:
log("ERROR", "Parameter out of range (0-100).")
sys.exit(1)
except:
log("ERROR", "No extra parameter provided, aborting!")
sys.exit(1)
if usercommand == "COLOR":
try:
color = int(param)
if color >= 0 and color <= 255:
color = hex(color)[2:].zfill(2).upper()
color_msg = color + " "
for count in range(0, 3):
color_msg += str(color) | |
<filename>tests/test_render_meshes_clipped.py
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Checks for mesh rasterization in the case where the camera enters the
inside of the mesh and some mesh faces are partially
behind the image plane. These faces are clipped and then rasterized.
See pytorch3d/renderer/mesh/clip.py for more details about the
clipping process.
"""
import unittest
import imageio
import numpy as np
import torch
from common_testing import TestCaseMixin, get_tests_dir, load_rgb_image
from pytorch3d.io import save_obj
from pytorch3d.renderer.cameras import (
FoVPerspectiveCameras,
PerspectiveCameras,
look_at_view_transform,
)
from pytorch3d.renderer.lighting import PointLights
from pytorch3d.renderer.mesh import (
ClipFrustum,
TexturesUV,
clip_faces,
convert_clipped_rasterization_to_original_faces,
)
from pytorch3d.renderer.mesh.rasterize_meshes import _RasterizeFaceVerts
from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings
from pytorch3d.renderer.mesh.renderer import MeshRenderer
from pytorch3d.renderer.mesh.shader import SoftPhongShader
from pytorch3d.renderer.mesh.textures import TexturesVertex
from pytorch3d.structures.meshes import Meshes
from pytorch3d.utils import torus
# If DEBUG=True, save out images generated in the tests for debugging.
# All saved images have prefix DEBUG_
DEBUG = False
DATA_DIR = get_tests_dir() / "data"
class TestRenderMeshesClipping(TestCaseMixin, unittest.TestCase):
def load_cube_mesh_with_texture(self, device="cpu", with_grad: bool = False):
verts = torch.tensor(
[
[-1, 1, 1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, -1],
[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
],
device=device,
dtype=torch.float32,
requires_grad=with_grad,
)
# all faces correctly wound
faces = torch.tensor(
[
[0, 1, 4],
[4, 1, 5],
[1, 2, 5],
[5, 2, 6],
[2, 7, 6],
[2, 3, 7],
[3, 4, 7],
[0, 4, 3],
[4, 5, 6],
[4, 6, 7],
],
device=device,
dtype=torch.int64,
)
verts_uvs = torch.tensor(
[
[
[0, 1],
[1, 1],
[1, 0],
[0, 0],
[0.204, 0.743],
[0.781, 0.743],
[0.781, 0.154],
[0.204, 0.154],
]
],
device=device,
dtype=torch.float,
)
texture_map = load_rgb_image("room.jpg", DATA_DIR).to(device)
textures = TexturesUV(
maps=[texture_map], faces_uvs=faces.unsqueeze(0), verts_uvs=verts_uvs
)
mesh = Meshes([verts], [faces], textures=textures)
if with_grad:
return mesh, verts
return mesh
def debug_cube_mesh_render(self):
"""
End-End debug run of rendering a cube mesh with texture
from decreasing camera distances. The camera starts
outside the cube and enters the inside of the cube.
"""
device = torch.device("cuda:0")
mesh = self.load_cube_mesh_with_texture(device)
raster_settings = RasterizationSettings(
image_size=512,
blur_radius=1e-8,
faces_per_pixel=5,
z_clip_value=1e-2,
perspective_correct=True,
bin_size=0,
)
# Only ambient, no diffuse or specular
lights = PointLights(
device=device,
ambient_color=((1.0, 1.0, 1.0),),
diffuse_color=((0.0, 0.0, 0.0),),
specular_color=((0.0, 0.0, 0.0),),
location=[[0.0, 0.0, -3.0]],
)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(raster_settings=raster_settings),
shader=SoftPhongShader(device=device, lights=lights),
)
# Render the cube by decreasing the distance from the camera until
# the camera enters the cube. Check the output looks correct.
images_list = []
dists = np.linspace(0.1, 2.5, 20)[::-1]
for d in dists:
R, T = look_at_view_transform(d, 0, 0)
T[0, 1] -= 0.1 # move down in the y axis
cameras = FoVPerspectiveCameras(device=device, R=R, T=T, fov=90)
images = renderer(mesh, cameras=cameras)
rgb = images[0, ..., :3].cpu().detach()
im = (rgb.numpy() * 255).astype(np.uint8)
images_list.append(im)
# Save a gif of the output - this should show
# the camera moving inside the cube.
if DEBUG:
gif_filename = (
"room_original.gif"
if raster_settings.z_clip_value is None
else "room_clipped.gif"
)
imageio.mimsave(DATA_DIR / gif_filename, images_list, fps=2)
save_obj(
f=DATA_DIR / "cube.obj",
verts=mesh.verts_packed().cpu(),
faces=mesh.faces_packed().cpu(),
)
@staticmethod
def clip_faces(meshes):
verts_packed = meshes.verts_packed()
faces_packed = meshes.faces_packed()
face_verts = verts_packed[faces_packed]
mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()
num_faces_per_mesh = meshes.num_faces_per_mesh()
frustum = ClipFrustum(
left=-1,
right=1,
top=-1,
bottom=1,
# In the unit tests for each case below the triangles are asummed
# to have already been projected onto the image plane.
perspective_correct=False,
z_clip_value=1e-2,
cull=True, # Cull to frustrum
)
clipped_faces = clip_faces(
face_verts, mesh_to_face_first_idx, num_faces_per_mesh, frustum
)
return clipped_faces
def test_grad(self):
"""
Check that gradient flow is unaffected when the camera is inside the mesh
"""
device = torch.device("cuda:0")
mesh, verts = self.load_cube_mesh_with_texture(device=device, with_grad=True)
raster_settings = RasterizationSettings(
image_size=512,
blur_radius=1e-5,
faces_per_pixel=5,
z_clip_value=1e-2,
perspective_correct=True,
bin_size=0,
)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(raster_settings=raster_settings),
shader=SoftPhongShader(device=device),
)
dist = 0.4 # Camera is inside the cube
R, T = look_at_view_transform(dist, 0, 0)
cameras = FoVPerspectiveCameras(device=device, R=R, T=T, fov=90)
images = renderer(mesh, cameras=cameras)
images.sum().backward()
# Check gradients exist
self.assertIsNotNone(verts.grad)
def test_case_1(self):
"""
Case 1: Single triangle fully in front of the image plane (z=0)
Triangle is not clipped or culled. The triangle is asummed to have
already been projected onto the image plane so no perspective
correction is needed.
"""
device = "cuda:0"
verts = torch.tensor(
[[0.0, 0.0, 1.0], [1.0, 0.0, 1.0], [0.0, 1.0, 1.0]],
dtype=torch.float32,
device=device,
)
faces = torch.tensor(
[
[0, 1, 2],
],
dtype=torch.int64,
device=device,
)
meshes = Meshes(verts=[verts], faces=[faces])
clipped_faces = self.clip_faces(meshes)
self.assertClose(clipped_faces.face_verts, verts[faces])
self.assertEqual(clipped_faces.mesh_to_face_first_idx.item(), 0)
self.assertEqual(clipped_faces.num_faces_per_mesh.item(), 1)
self.assertIsNone(clipped_faces.faces_clipped_to_unclipped_idx)
self.assertIsNone(clipped_faces.faces_clipped_to_conversion_idx)
self.assertIsNone(clipped_faces.clipped_faces_neighbor_idx)
self.assertIsNone(clipped_faces.barycentric_conversion)
def test_case_2(self):
"""
Case 2 triangles are fully behind the image plane (z=0) so are completely culled.
Test with a single triangle behind the image plane.
"""
device = "cuda:0"
verts = torch.tensor(
[[-1.0, 0.0, -1.0], [0.0, 1.0, -1.0], [1.0, 0.0, -1.0]],
dtype=torch.float32,
device=device,
)
faces = torch.tensor(
[
[0, 1, 2],
],
dtype=torch.int64,
device=device,
)
meshes = Meshes(verts=[verts], faces=[faces])
clipped_faces = self.clip_faces(meshes)
zero_t = torch.zeros(size=(1,), dtype=torch.int64, device=device)
self.assertClose(
clipped_faces.face_verts, torch.empty(device=device, size=(0, 3, 3))
)
self.assertClose(clipped_faces.mesh_to_face_first_idx, zero_t)
self.assertClose(clipped_faces.num_faces_per_mesh, zero_t)
self.assertClose(
clipped_faces.faces_clipped_to_unclipped_idx,
torch.empty(device=device, dtype=torch.int64, size=(0,)),
)
self.assertIsNone(clipped_faces.faces_clipped_to_conversion_idx)
self.assertIsNone(clipped_faces.clipped_faces_neighbor_idx)
self.assertIsNone(clipped_faces.barycentric_conversion)
def test_case_3(self):
"""
Case 3 triangles have exactly two vertices behind the clipping plane (z=0) so are
clipped into a smaller triangle.
Test with a single triangle parallel to the z axis which intersects with
the image plane.
"""
device = "cuda:0"
verts = torch.tensor(
[[-1.0, 0.0, -1.0], [0.0, 0.0, 1.0], [1.0, 0.0, -1.0]],
dtype=torch.float32,
device=device,
)
faces = torch.tensor(
[
[0, 1, 2],
],
dtype=torch.int64,
device=device,
)
meshes = Meshes(verts=[verts], faces=[faces])
clipped_faces = self.clip_faces(meshes)
zero_t = torch.zeros(size=(1,), dtype=torch.int64, device=device)
clipped_face_verts = torch.tensor(
[
[
[0.4950, 0.0000, 0.0100],
[-0.4950, 0.0000, 0.0100],
[0.0000, 0.0000, 1.0000],
]
],
device=device,
dtype=torch.float32,
)
# barycentric_conversion[i, :, k] stores the barycentric weights
# in terms of the world coordinates of the original
# (big) triangle for the kth vertex in the clipped (small) triangle.
barycentric_conversion = torch.tensor(
[
[
[0.0000, 0.4950, 0.0000],
[0.5050, 0.5050, 1.0000],
[0.4950, 0.0000, 0.0000],
]
],
device=device,
dtype=torch.float32,
)
self.assertClose(clipped_faces.face_verts, clipped_face_verts)
self.assertEqual(clipped_faces.mesh_to_face_first_idx.item(), 0)
self.assertEqual(clipped_faces.num_faces_per_mesh.item(), 1)
self.assertClose(clipped_faces.faces_clipped_to_unclipped_idx, zero_t)
self.assertClose(clipped_faces.faces_clipped_to_conversion_idx, zero_t)
self.assertClose(
clipped_faces.clipped_faces_neighbor_idx,
zero_t - 1, # default is -1
)
self.assertClose(clipped_faces.barycentric_conversion, barycentric_conversion)
def test_case_4(self):
"""
Case 4 triangles have exactly 1 vertex behind the clipping plane (z=0) so
are clipped into a smaller quadrilateral and then divided into two triangles.
Test with a single triangle parallel to the z axis which intersects with
the image plane.
"""
device = "cuda:0"
verts = torch.tensor(
[[0.0, 0.0, -1.0], [-1.0, 0.0, 1.0], [1.0, 0.0, 1.0]],
dtype=torch.float32,
device=device,
)
faces = torch.tensor(
[
[0, 1, 2],
],
dtype=torch.int64,
device=device,
)
meshes = Meshes(verts=[verts], faces=[faces])
clipped_faces = self.clip_faces(meshes)
clipped_face_verts = torch.tensor(
[
# t1
[
[-0.5050, 0.0000, 0.0100],
[-1.0000, 0.0000, 1.0000],
[0.5050, 0.0000, 0.0100],
],
# t2
[
[0.5050, 0.0000, 0.0100],
[-1.0000, 0.0000, 1.0000],
[1.0000, 0.0000, 1.0000],
],
],
device=device,
dtype=torch.float32,
)
barycentric_conversion = torch.tensor(
[
[
[0.4950, 0.0000, 0.4950],
[0.5050, 1.0000, 0.0000],
[0.0000, 0.0000, 0.5050],
],
[
[0.4950, 0.0000, 0.0000],
[0.0000, 1.0000, 0.0000],
[0.5050, 0.0000, 1.0000],
],
],
device=device,
dtype=torch.float32,
)
self.assertClose(clipped_faces.face_verts, clipped_face_verts)
self.assertEqual(clipped_faces.mesh_to_face_first_idx.item(), 0)
self.assertEqual(
clipped_faces.num_faces_per_mesh.item(), 2
) # now two faces instead of 1
self.assertClose(
clipped_faces.faces_clipped_to_unclipped_idx,
torch.tensor([0, 0], device=device, dtype=torch.int64),
)
# Neighboring face for each of the sub triangles e.g. for t1, neighbor is t2,
# and for t2, neighbor is t1
self.assertClose(
clipped_faces.clipped_faces_neighbor_idx,
torch.tensor([1, 0], device=device, dtype=torch.int64),
)
# barycentric_conversion is of shape (F_clipped)
self.assertEqual(clipped_faces.barycentric_conversion.shape[0], 2)
self.assertClose(clipped_faces.barycentric_conversion, barycentric_conversion)
# Index into barycentric_conversion for each clipped face.
self.assertClose(
clipped_faces.faces_clipped_to_conversion_idx,
torch.tensor([0, 1], device=device, dtype=torch.int64),
)
def test_mixture_of_cases(self):
"""
Test with two meshes composed of different cases to check all the
indexing is correct.
Case 4 faces are subdivided into two faces which are referred
to as t1 and t2.
"""
device = "cuda:0"
# fmt: off
verts = [
torch.tensor(
[
[-1.0, 0.0, -1.0], # noqa: E241, E201
[ 0.0, 1.0, -1.0], # noqa: E241, E201
[ 1.0, 0.0, -1.0], # noqa: E241, E201
[ 0.0, -1.0, -1.0], # noqa: E241, E201
[-1.0, 0.5, 0.5], # noqa: E241, E201
[ 1.0, 1.0, 1.0], # noqa: E241, E201
[ 0.0, -1.0, 1.0], # noqa: E241, E201
[-1.0, 0.5, -0.5], | |
= new_name
else:
module.fail_json(changed=False, msg=str(err))
new_keys = []
if key_state == 'create':
try:
while key_count > key_qty:
new_keys.append(iam.create_access_key(
user_name=name).create_access_key_response.create_access_key_result.access_key)
key_qty += 1
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
if keys and key_state:
for access_key in keys:
if key_state in ('active', 'inactive'):
if access_key in current_keys:
for current_key, current_key_state in zip(current_keys, status):
if key_state != current_key_state.lower():
try:
iam.update_access_key(access_key, key_state.capitalize(), user_name=name)
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
module.fail_json(msg="Supplied keys not found for %s. "
"Current keys: %s. "
"Supplied key(s): %s" %
(name, current_keys, keys)
)
if key_state == 'remove':
if access_key in current_keys:
try:
iam.delete_access_key(access_key, user_name=name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
try:
final_keys, final_key_status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
for fk, fks in zip(final_keys, final_key_status):
updated_key_list.update({fk: fks})
return name_change, updated_key_list, changed, new_keys
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
""" Sets groups for a user, will purge groups not explicitly passed, while
retaining pre-existing groups that also are in the new list.
"""
changed = False
if updated:
name = new_name
try:
orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
name).list_groups_for_user_result.groups]
remove_groups = [
rg for rg in frozenset(orig_users_groups).difference(groups)]
new_groups = [
ng for ng in frozenset(groups).difference(orig_users_groups)]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
if len(orig_users_groups) > 0:
for new in new_groups:
iam.add_user_to_group(new, name)
for rm in remove_groups:
iam.remove_user_from_group(rm, name)
else:
for group in groups:
try:
iam.add_user_to_group(group, name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('The group with name %s cannot be found.' % group) in error_msg:
module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
if len(remove_groups) > 0 or len(new_groups) > 0:
changed = True
return (groups, changed)
def create_group(module=None, iam=None, name=None, path=None):
changed = False
try:
iam.create_group(
name, path).create_group_response.create_group_result.group
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return name, changed
def delete_group(module=None, iam=None, name=None):
changed = False
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must delete policies first') in error_msg:
for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
iam.delete_group_policy(name, policy)
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must delete policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the policies "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(error_msg))
else:
changed = True
else:
module.fail_json(changed=changed, msg=str(error_msg))
else:
changed = True
return changed, name
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
current_group_path = iam.get_group(
name).get_group_response.get_group_result.group['path']
if new_path:
if current_group_path != new_path:
iam.update_group(name, new_path=new_path)
changed = True
if new_name:
if name != new_name:
iam.update_group(name, new_group_name=new_name, new_path=new_path)
changed = True
name = new_name
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
return changed, name, new_path, current_group_path
def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name not in role_list:
changed = True
iam_role_result = iam.create_role(name,
assume_role_policy_document=trust_policy_doc,
path=path).create_role_response.create_role_result.role
if name not in prof_list:
instance_profile_result = iam.create_instance_profile(name, path=path) \
.create_instance_profile_response.create_instance_profile_result.instance_profile
iam.add_role_to_instance_profile(name, name)
else:
instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
iam_role_result = iam.get_role(name).get_role_response.get_role_result.role
return changed, updated_role_list, iam_role_result, instance_profile_result
def delete_role(module, iam, name, role_list, prof_list):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name in role_list:
cur_ins_prof = [rp['instance_profile_name'] for rp in
iam.list_instance_profiles_for_role(name).
list_instance_profiles_for_role_result.
instance_profiles]
for profile in cur_ins_prof:
iam.remove_role_from_instance_profile(profile, name)
try:
iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
iam.delete_role_policy(name, policy)
try:
iam_role_result = iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the policies "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
for prof in prof_list:
if name == prof:
instance_profile_result = iam.delete_instance_profile(name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
return changed, updated_role_list, iam_role_result, instance_profile_result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(required=True, choices=['user', 'group', 'role']),
groups=dict(type='list', default=None, required=False),
state=dict(required=True, choices=['present', 'absent', 'update']),
password=dict(default=None, required=False, no_log=True),
update_password=dict(default='<PASSWORD>', required=False, choices=['always', 'on_create']),
access_key_state=dict(default=None, required=False, choices=[
'active', 'inactive', 'create', 'remove',
'Active', 'Inactive', 'Create', 'Remove']),
access_key_ids=dict(type='list', default=None, required=False),
key_count=dict(type='int', default=1, required=False),
name=dict(default=None, required=False),
trust_policy_filepath=dict(default=None, required=False),
trust_policy=dict(type='dict', default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['trust_policy', 'trust_policy_filepath']],
)
if not HAS_BOTO:
module.fail_json(msg='This module requires boto, please install it')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
groups = module.params.get('groups')
name = module.params.get('name')
new_name = module.params.get('new_name')
password = module.params.get('password')
update_pw = module.params.get('update_password')
path = module.params.get('path')
new_path = module.params.get('new_path')
key_count = module.params.get('key_count')
key_state = module.params.get('access_key_state')
trust_policy = module.params.get('trust_policy')
trust_policy_filepath = module.params.get('trust_policy_filepath')
key_ids = module.params.get('access_key_ids')
if key_state:
key_state = key_state.lower()
if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
module.fail_json(changed=False, msg="At least one access key has to be defined in order"
" to use 'active' or 'inactive'")
if iam_type == 'user' and module.params.get('password') is not None:
pwd = module.params.get('password')
elif iam_type != 'user' and module.params.get('password') is not None:
module.fail_json(msg="a password is being specified when the iam_type "
"is not user. Check parameters")
else:
pwd = None
if iam_type != 'user' and (module.params.get('access_key_state') is not None or
module.params.get('access_key_id') is not None):
module.fail_json(msg="the IAM type must be user, when IAM access keys "
"are being modified. Check parameters")
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specify present or absent")
# check if trust_policy is present -- it can be inline JSON or a file path to a JSON file
if trust_policy_filepath:
try:
with open(trust_policy_filepath, 'r') as json_data:
trust_policy_doc = json.dumps(json.load(json_data))
except Exception as e:
module.fail_json(msg=str(e) + ': ' + trust_policy_filepath)
elif trust_policy:
try:
trust_policy_doc = json.dumps(trust_policy)
except Exception as e:
module.fail_json(msg=str(e) + ': ' + trust_policy)
else:
trust_policy_doc = None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
result = {}
changed = False
try:
orig_group_list = list_all_groups(iam)
orig_user_list = list_all_users(iam)
orig_role_list = list_all_roles(iam)
orig_prof_list = list_all_instance_profiles(iam)
except boto.exception.BotoServerError as err:
module.fail_json(msg=err.message)
if iam_type == 'user':
been_updated = False
user_groups = None
user_exists = any([n in [name, new_name] for n in orig_user_list])
if user_exists:
current_path = iam.get_user(name).get_user_result.user['path']
if not new_path and current_path != path:
new_path = path
path = current_path
if state == 'present' and not user_exists and not new_name:
(meta, changed) = create_user(
module, iam, name, password, path, key_state, key_count)
keys = iam.get_all_access_keys(name).list_access_keys_result.\
access_key_metadata
if groups:
(user_groups, changed) = set_users_groups(
module, iam, name, groups, been_updated, new_name)
module.exit_json(
user_meta=meta, groups=user_groups, keys=keys, changed=changed)
elif state in ['present', 'update'] and user_exists:
if update_pw == 'on_create':
password = <PASSWORD>
if name not in orig_user_list and new_name in orig_user_list:
been_updated = True
name_change, key_list, user_changed, new_key = update_user(
module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
if new_key:
user_meta = {'access_keys': list(new_key)}
user_meta['access_keys'].extend(
[{'access_key_id': key, 'status': value} for key, value in key_list.items() if
key not in [it['access_key_id'] for it in new_key]])
else:
user_meta = {
'access_keys': [{'access_key_id': key, 'status': value} for key, value in key_list.items()]}
if name_change and new_name:
orig_name = name
name = new_name
if isinstance(groups, list):
user_groups, groups_changed = set_users_groups(
module, iam, name, groups, been_updated, new_name)
if groups_changed == user_changed:
changed = groups_changed
else:
changed = True
else:
changed = user_changed
if new_name and new_path:
module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list,
created_keys=new_key, user_meta=user_meta)
elif new_name and not new_path and not been_updated:
module.exit_json(
changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list,
created_keys=new_key, user_meta=user_meta)
elif new_name and not new_path and been_updated:
module.exit_json(
changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state,
created_keys=new_key, user_meta=user_meta)
elif not new_name and new_path:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path,
keys=key_list, created_keys=new_key, user_meta=user_meta)
else:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, keys=key_list, created_keys=new_key,
| |
<filename>Amii2.py
# -*- coding: utf-8 -*-
import LINETCR
import urllib
import subprocess
import profile
import requests
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re
from bs4 import BeautifulSoup
from threading import Thread
import urllib3
cl = LINETCR.LINE()
cl.login(token="EnPqPA1Rcg83o27naIC1.TA54dxsS3sWfT1dVF7sUmq.G7j9VasepEbrLG6ar8N+ejqTMbLVHETvPBpQa9YwFRc=")
cl.loginResult()
ki = LINETCR.LINE()
ki.login(token="EnVAKi465IsrNb5WP1T1.JfCtHOqQfDhIFCfpOIzoeq.gGXino/PQk7O6lsP9BJi6L5sB3QHyithHedHiK/Rq54=")
ki.loginResult()
ki2 = LINETCR.LINE()
ki2.login(token="En76eiS877UDyJYaYTLb.Cu584iN8cI2QN/belsU9sW.fTKNvsAzhWWz5kHG4EJwWIawvUBktTIof77MqxpTuZI=")
ki2.loginResult()
print u"login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""||===== F O R A D M I N =====||
||��✒ Glist - List Group
||��✒ Cancel - Cancel All Pending Grup
||��✒ Mid @ - Get MID
||��✒ Invite:on - Invite Via Send Contact
||��✒ Bans:on - Ban Via Send Contact
||��✒ Unbans:on - Unban Via Send Contact
||��✒ Unban @ - Unban Via Tag
||��✒ Ban @ - Ban Via Tag
||��✒ Clearban - Delete All Blacklist
||��✒ Link on - Open QR
||��✒ Link off - Close QR
||��✒ Gurl - Open QR And Get Link
||��✒ Url - Get QR Link
||��✒ Gn - Ganti Nama Group
||��✒ Banlist - Cek Tersangka Kriminal
||��✒ Ginfo - Info Group
||===== F O R K I C K E R =====||
||��✒ @left - Left Dari Group
||��✒ @masuk - Semua Bot Join
||��✒ @bye - Keluarkan Semua Bot
||��✒ /sikat - Kick Via Tag
||��✒ /balik @ - Kick Via Ghost Kicker
||��✒ /pancal - Asist Kick
||��✒ #Bubar - Command Player
||===== F O R P L A Y E R =====||
||��✒ Cctv - Cek Sider
||��✒ Cilubba - Liat Sidder
||��✒ Spam:add: - Tambah Kata Spam
||��✒ Spam: - Isi Angka Spam
||��✒ Allbio: - Ganti Semua Bio Bot
||��✒ Allname: - Ganti Semua Nama Bot
||��✒ Myname: - Ganti Nama Sendiri
||��✒ Mybio: - Ganti Bio Sendiri
||��✒ Mybot - Kontak Semua Bot
||=========================||
||===== S E T T I G S =====||
|| [Auto add on/off]
|| [Auto join on/off]
|| [Contact on/off]
|| [Leave on/off]
|| [Share on/off]
||===== P R O T E C T =====||
|| [Mode On/Off]
|| [Protect on]
|| [Qrprotect on/off]
|| [Inviteprotect on/off]
|| [Cancelprotect on/off]
||======= FOR ADMIN =======||
✯==== ✯TeaM✯BotS✯BaKeKoK✯ ====✯
✯==== Creator ====✯
http://line.me/ti/p/~amiiqila_
"""
helo=""
KAC=[cl,ki,ki2]
mid = cl.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
Bots=[mid,kimid,ki2mid]
admsa = "ub5ae780d74acdd2c05b750ef7fb4ae31"
admin = "ub<PASSWORD>"
Creator = "u<PASSWORD>"
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":False,"members":50},
'leaveRoom':True,
'timeline':False,
'autoAdd':True,
'message':"✯✯✯✯✯✯✯✯✯✯\n✯Thanks For Add Me✯\n✯✯✯✯✯✯✯✯✯✯\n\n✯==== Creator ====✯\n\nhttp://line.me/ti/p/~amiiqila_",
"lang":"JP",
"comment":"✯✯✯✯✯✯✯✯✯✯\n✯Thanks For Add Me✯\n✯✯✯✯✯✯✯✯✯✯\n\n✯==== Creator ====✯\n\nhttp://line.me/ti/p/~amiiqila_",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cNames":"",
"cNames":"",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":True,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace(" ",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == "u57a54b914085fea6f04c19f6fe589057":
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"sudah masuk daftar hitam👈")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Itu tidak berkomentar👈")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"Tidak ada dalam daftar hitam👈")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done👈")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Myhelp"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpMessage)
elif msg.text in ["Invite:on"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Invite1:on"]:
if msg.from_ in admin:
wait["winvite"] = True
ki.sendText(msg.to,"send contact")
elif ("Gn:" in msg.text):
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.name = msg.text.replace("Gn:","")
ki.updateGroup(group)
else:
cl.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok👈")
elif ("Gn " in msg.text):
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.name = msg.text.replace("Gn ","")
cl.updateGroup(group)
else:
cl.sendText(msg.to,"Can not be used for groups other than")
elif "Kick:" in msg.text:
midd = msg.text.replace("Kick:","")
cl.kickoutFromGroup(msg.to,[midd])
elif "Invite:" in msg.text:
midd = msg.text.replace("Invite:","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif msg.text.lower() == 'mybot':
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki7mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki8mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki9mid}
cl.sendMessage(msg)
msg.contentType = 13
elif "Bot1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif "Bot2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif msg.text in ["Bot1 Gift","Bot1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Bot2 Gift","Bot2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["B Cancel","Cancel dong","B cancel"]:
if msg.toType == 2:
group = ki.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
ki.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No invites👈")
else:
cl.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"invitan tidak ada")
elif msg.text in ["Cancel","cancel"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No invites👈")
else:
cl.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan👈")
else:
cl.sendText(msg.to,"invitan tidak ada")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Link on"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL open")
else:
cl.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group ô€œ ô€„‰👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œ ô€„‰")
elif msg.text in | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import time
import cv2
import sys
import os
import queue
__dir__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, __dir__)
from WrappedDeviceAPI import *
def sample():
deviceAPI = IDeviceAPI('Android')
'''
describe:初始化
param[0],str类型:手机序列号,默认为None,当接入一个设备时可不指定序列号,当接入多个设备时需要指定
param[1],bool类型:手机为横屏还是竖屏,True为竖屏,False为横屏
param[2],int类型:长边的长度
param[3],str类型:指定日志目录,默认为/tmp/LogDeviceAPI
param[4],枚举类型:指定日志级别,取值为[LOG_DEBUG, LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_CRITICAL],默认为LOG_DEBUG
param[5],bool类型:是否show出图片
param[5],字典 :一些组件需要的参数,可以自己定义,例如端口号等等
return,bool类型,成功返回True,失败返回False
'''
if not deviceAPI.Initialize('908fedc0', False, 720, 1280, '/tmp/LogDeviceAPI', LOG_DEBUG):
return False
'''
describe:获取当前图像帧
return:Mat类型的图像
'''
frame = deviceAPI.GetFrame()
if frame is None:
return False
'''
==========================================================================================================
============================================TouchCMD==================================================
==========================================================================================================
describe:让手机执行动作
aType参数表示动作类型[TOUCH_CLICK, TOUCH_DOWN, TOUCH_UP, TOUCH_SWIPE, TOUCH_MOVE]
sx为x坐标,当aType为[TOUCH_CLICK, TOUCH_DOWN]时表示按压点的x坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示起始点的x坐标
sy为y坐标,当aType为[TOUCH_CLICK, TOUCH_DOWN]时表示按压点的y坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示起始点的y坐标
ex为x坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示结束点的x坐标
ex为y坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示结束点的y坐标
DaType为执行该操作的方式,有minitouch方式和ADB命令方式,分别表示为[DACT_TOUCH, DACT_ADB],默认为DACT_TOUCH
contact为触点,默认为0
durationMS为执行一次动作持续的时间,在aType为[TOUCH_CLICK, TOUCH_SWIPE]时使用,当aType为TOUCH_CLICK时默认为-1,当aType为TOUCH_SWIPE时默认为50
needUp仅在aType为TOUCH_SWIPE时使用,表示滑动后是否需要抬起,默认为True
return:True or False
'''
# deviceAPI.TouchCMD(aType=[TOUCH_CLICK, TOUCH_DOWN, TOUCH_UP, TOUCH_SWIPE, TOUCH_MOVE],
# sx=int,
# sy=int,
# ex=int,
# ey=int,
# contact=0,
# durationMS=50,
# needUp=True,
# wait_time=0)
'''
describe:执行点击操作
sx为横坐标,相对于初始化时传入的坐标系
sy为纵坐标,相对于初始化时传入的坐标系
contact为触点,默认为0
durantionMS为动作持续时间,默认为-1
wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0
return True or False
'''
if not deviceAPI.TouchCMD(aType=TOUCH_CLICK, sx=300, sy=300, contact=0, durantionMS=-1, wait_time=0):
return False
'''
describe:执行按压操作
sx为横坐标,相对于初始化时传入的坐标系
sy为纵坐标,相对于初始化时传入的坐标系
contact为触点,默认为0
wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0
return True or False
'''
if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=300, sy=300, contact=0, wait_time=0):
return False
'''
describe:执行抬起操作
wait_time为执行动作后,手机端等待时间,单位为秒,默认为0
return True or False
'''
if not deviceAPI.TouchCMD(aType=TOUCH_UP, contact=1, wait_time=0):
return False
'''
describe:执行滑动
sx, sy为起始点的坐标
ex, ey为终止点的坐标
DaType表示执行动作的实现方式,有minitouch和ADB两种[DACT_TOUCH, DACT_ADB],默认为DACT_TOUCH
contact为触点,默认为0
durantionMS为动作持续时间,默认为50
needUp表示滑动后是否抬起,默认为True
wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0
return True or False
'''
if not deviceAPI.TouchCMD(aType=TOUCH_SWIPE,
sx=500,
sy=500,
ex=600,
ey=600,
contact=0,
durationMS=500,
needUp=False,
wait_time=0):
return False
'''
describe:执行滑动操作,与swipe不同的是他只有终止点,通过多个move可以组合成一个swipe
sx为横坐标,相对于初始化时传入的坐标系
sy为纵坐标,相对于初始化时传入的坐标系
contact为触点,默认为0
wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0
return True or False
'''
if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=300, sy=300, contact=0, wait_time=0):
return False
'''
describe:执行滑动操作,与move不同的是它进行了补点操作
sx为横坐标,相对于初始化时传入的坐标系
sy为纵坐标,相对于初始化时传入的坐标系
contact为触点,默认为0
wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0
return True or False
'''
if not deviceAPI.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=300, sy=300, durationMS=50, contact=0, wait_time=0):
return False
'''
describe:reset
wait_time为执行动作后,手机端等待时间,单位为毫秒,默认为0
return True or False
'''
if not deviceAPI.TouchCMD(aType=TOUCH_RESET, wait_time=0):
return False
'''
==========================================================================================================
============================================DeviceCMD=================================================
==========================================================================================================
describe:执行设备相关的操作
aType:操作类型[DEVICE_INSTALL, DEVICE_START, DEVICE_EXIT, DEVICE_CURAPP, DEVICE_CLEARAPP, DEVICE_KEY,
DEVICE_TEXT, DEVICE_SLEEP, DEVICE_WAKE, DEVICE_WMSIZE, DEVICE_BINDRO, DEVICE_SCREENSHOT,
DEVICE_SCREENORI]
APKPath:安装包路径
PKGName:包名
ActivityName:包的activity
key:
'''
# deviceAPI.DeviceCMD(aType=[DEVICE_INSTALL, DEVICE_START, DEVICE_EXIT, DEVICE_CURAPP, DEVICE_CLEARAPP, DEVICE_KEY,
# DEVICE_TEXT, DEVICE_SLEEP, DEVICE_WAKE, DEVICE_WMSIZE, DEVICE_BINDRO, DEVICE_SCREENSHOT,
# DEVICE_SCREENORI],
# APKPath=str,
# PKGName=str,
# ActivityName=str,
# key=str,
# text=str,
# rotation=str,
# targetPath=str)
'''
aType为DEVICE_INSTALL时表示安装app
APKPath为所需参数,表示apk包在PC端的存放路径
return True or False
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_INSTALL, APKPath='/home/ting/kidting/game_ai_sdk/data/qqspeed/game.apk'):
return False
'''
aType为DEVICE_START时表示启动app
APKPath为所需参数,表示apk包在PC端的存放路径
ActivityName为apk包启动的activity
return True or False
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_START, PKGName='com.tencent.tmgp.speedmobile',
ActivityName='com.tencent.tmgp.speedmobile.speedmobile'):
return False
'''
aType为DEVICE_CURAPP时表示获取当前app
return 字典,currentAPP = {'package': str(), 'activity': str()}
'''
currentAPP = deviceAPI.DeviceCMD(aType=DEVICE_CURAPP)
'''
aType为DEVICE_PARAM时表示获取当前app运行时,手机的性能参数
PKGName为所需参数,表示APP包名
return deviceParam为字典,分别存有CPU, 内存, 电量, 温度这四个参数
deviceParam = {
'cpu': float,
'mem': float,
'temperature': float,
'battery': int
}
'''
deviceParam = deviceAPI.DeviceCMD(aType=DEVICE_PARAM, PKGName='com.tencent.tmgp.speedmobile')
'''
aType为DEVICE_CLEARAPP时表示清空app数据
PKGName为所需参数,表示APP包名
return True or False
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_CLEARAPP, PKGName='com.tencent.tmgp.speedmobile'):
return False
'''
aType为DEVICE_EXIT时表示退出app
PKGName为所需参数,表示APP包名
return True or False
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_EXIT, PKGName='com.tencent.tmgp.speedmobile'):
return False
'''
aType为DEVICE_KEY时表示输入手机键盘的按键
key为所需参数,str类型,表示手机具体按键
return True or False
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_KEY, key='cmd'):
return False
'''
aType为DEVICE_TEXT时表示输入字符串
text为所需参数,str类型,表示具体输入的字符串
return True or False
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_TEXT, text='abc'):
return False
'''
aType为DEVICE_SLEEP时表示设备锁屏
return True or False
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_SLEEP):
return False
'''
aType为DEVICE_WAKE时表示设备解锁启动
return True or False
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_WAKE):
return False
'''
aType为DEVICE_WMSIZE时表示获取设备的分辨率
return height, width
'''
height, width = deviceAPI.DeviceCMD(aType=DEVICE_WMSIZE)
if height == -1 or width == -1:
return False
# '''
# aType为DEVICE_BINDRO时表示设置设备锁定朝向
# return height, width
# '''
# height, width = deviceAPI.DeviceCMD(aType=DEVICE_BINDRO)
# if height == -1 or width == -1:
# return False
'''
aType为DEVICE_SCREENSHOT时表示快照,截屏
targetPath表示在PC端存放的路径
return True or False
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_SCREENSHOT, targetPath='./test.png'):
return False
'''
aType为DEVICE_SCREENORI时表示返回设备当前时横屏还是竖屏
return UI_SCREEN_ORI_PORTRAIT or UI_SCREEN_ORI_LANDSCAPE
'''
res = deviceAPI.DeviceCMD(aType=DEVICE_SCREENORI)
if res == UI_SCREEN_ORI_PORTRAIT:
print('竖屏')
elif res == UI_SCREEN_ORI_LANDSCAPE:
print('横屏')
else:
return False
'''
describe:获取最大触点数
return int
'''
maxContact = deviceAPI.DeviceCMD(aType=DEVICE_MAXCONTACT)
if maxContact < 0:
return False
'''
describe:用ADB命令执行点击操作
return int
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_CLICK, px=300, py=300):
return False
'''
describe:用ADB命令执行滑动操作(需要先执行点击后,才能看到滑动效果,将会瞬间滑动到指定的坐标上)
return int
'''
if not deviceAPI.DeviceCMD(aType=DEVICE_SWIPE, sx=300, sy=300, ex=500, ey=500, durationMS=50):
return False
'''
describe:等待所有指令发送至手机端,在程序退出时使用
'''
deviceAPI.Finish()
'''
==========================================================================================================
==========================================================================================================
==========================================================================================================
'''
def demo1():
# deviceAPI1 = IDeviceAPI('Android', 'PlatformWeTest')
# deviceAPI2 = IDeviceAPI('Android', 'PlatformWeTest')
deviceAPI1 = IDeviceAPI('Android')
deviceAPI2 = IDeviceAPI('Android')
deviceAPI1.Initialize(deviceSerial='4da2dea3', height=200, width=1280, logDir='./log', minitouchPort=1122, minicapPort=1133)
deviceAPI2.Initialize(deviceSerial='9889db384258523633', height=200, width=1280, logDir='./log', minitouchPort=1144, minicapPort=1155)
# maxContact = deviceAPI.DeviceCMD(aType=DEVICE_MAXCONTACT)
# begin = time.time()
# for i in range(10):
# if not deviceAPI1.TouchCMD(aType=TOUCH_CLICK, sx=300, sy=300, durationMS=1000, wait_time=1000):
# print('click failed')
# end = time.time()
# print(end - begin)
#
# if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=100, sy=100, wait_time=1000):
# print('click failed')
# # if not deviceAPI.TouchCMD(aType=TOUCH_UP):
# print('up failed')
# if not deviceAPI.TouchCMD(aType=TOUCH_CLICK, sx=500, sy=500, contact=0, durantionMS=50, wait_time=1000):
# return False
# if not deviceAPI1.DeviceCMD(aType=DEVICE_SWIPE, sx=640, sy=100, ex=640, ey=300, durationMS=1000):
# print('click failed')
# time.sleep(100000)
# return None
if not deviceAPI1.TouchCMD(aType=TOUCH_DOWN, sx=640, sy=100, wait_time=1000):
print('click failed')
if not deviceAPI2.TouchCMD(aType=TOUCH_DOWN, sx=200, sy=200, wait_time=50):
print('click failed')
if not deviceAPI1.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=640, sy=300, durationMS=1000, contact=0, wait_time=1000):
return False
if not deviceAPI2.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=100, sy=100, durationMS=1000, contact=0, wait_time=1000):
return False
if not deviceAPI1.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=100, sy=100, durationMS=1000, contact=0, wait_time=1000):
return False
if not deviceAPI2.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=200, sy=200, durationMS=1000, contact=0, wait_time=1000):
return False
# print(maxContact)
# if not deviceAPI.TouchCMD(aType=TOUCH_SWIPE, sx=200, sy=200, ex=400, ey=400, wait_time=1000, durationMS=500):
# print('swipe failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=300, sy=300, wait_time=1000):
# print('click failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=500, sy=500, contact=1, wait_time=1000):
# print('click failed')
# return False
# for i in range(10):
# if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=500, sy=500, wait_time=1000):
# print('click failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=400, sy=400, contact=1, wait_time=1000):
# print('click failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=400, sy=400, wait_time=1000):
# print('click failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=500, sy=500, contact=1, wait_time=1000):
# print('click failed')
# return False
# # time.sleep(1)
#
# if not deviceAPI.TouchCMD(aType=TOUCH_UP, contact=1, wait_time=1000):
# print('click failed')
# return False
#
# if not deviceAPI.TouchCMD(aType=TOUCH_RESET):
# print('reset failed')
# return False
time.sleep(5)
for i in range(100000):
frame1 = deviceAPI1.GetFrame()
frame2 = deviceAPI2.GetFrame()
if frame1 is not None:
cv2.imshow('test1', frame1)
cv2.waitKey(1)
if frame2 is not None:
cv2.imshow('test2', frame2)
cv2.waitKey(1)
# #time.sleep(1)
def demo():
# deviceAPI1 = IDeviceAPI('Android', 'PlatformWeTest')
deviceAPI1 = IDeviceAPI('Android')
flag, strerror = deviceAPI1.Initialize(isPortrait=False, long_edge=1280, logDir='./log', level=LOG_INFO, showRawScreen=False)
print(flag)
print(strerror)
# maxContact = deviceAPI.DeviceCMD(aType=DEVICE_MAXCONTACT)
# begin = time.time()
# for i in range(10):
# if not deviceAPI1.TouchCMD(aType=TOUCH_CLICK, sx=300, sy=300, durationMS=1000, wait_time=1000):
# print('click failed')
# end = time.time()
# print(end - begin)
#
# if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=100, sy=100, wait_time=1000):
# print('click failed')
# # if not deviceAPI.TouchCMD(aType=TOUCH_UP):
# # print('up failed')
# pkgName = deviceAPI1.DeviceCMD(aType=DEVICE_CURAPP)
# parameter= deviceAPI1.DeviceCMD(aType=DEVICE_PARAM, PKGName=pkgName['package'])
# print(parameter)
# exit(0)
if not deviceAPI1.TouchCMD(aType=TOUCH_CLICK, sx=1130, sy=442, contact=0, durationMS=5000, wait_time=1000):
return False
if not deviceAPI1.DeviceCMD(aType=DEVICE_SWIPE, sx=640, sy=100, ex=640, ey=300, durationMS=1000):
print('click failed')
# time.sleep(100000)
# return None
# if not deviceAPI1.TouchCMD(aType=TOUCH_DOWN, sx=100, sy=100, wait_time=5000):
# print('click failed')
# if not deviceAPI1.TouchCMD(aType=TOUCH_UP):
# print('up failed')
# begin = time.time()
if not deviceAPI1.TouchCMD(aType=TOUCH_CLICK, sx=1270, sy=300, durationMS=5000, wait_time=1000):
print('click failed')
# end = time.time()
# print("action:{}".format(end - begin))
begin = time.time()
if not deviceAPI1.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=100, sy=300, durationMS=1000, contact=0, wait_time=1000):
return False
if not deviceAPI1.TouchCMD(aType=TOUCH_SWIPEMOVE, sx=100, sy=100, durationMS=1000, contact=0, wait_time=1000):
return False
end = time.time()
# print("action:{}".format(end - begin))
# print(maxContact)
# if not deviceAPI.TouchCMD(aType=TOUCH_SWIPE, sx=200, sy=200, ex=400, ey=400, wait_time=1000, durationMS=500):
# print('swipe failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=300, sy=300, wait_time=1000):
# print('click failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_DOWN, sx=500, sy=500, contact=1, wait_time=1000):
# print('click failed')
# return False
# for i in range(10):
# if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=500, sy=500, wait_time=1000):
# print('click failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=400, sy=400, contact=1, wait_time=1000):
# print('click failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=400, sy=400, wait_time=1000):
# print('click failed')
# return False
# if not deviceAPI.TouchCMD(aType=TOUCH_MOVE, sx=500, sy=500, contact=1, wait_time=1000):
# print('click failed')
# return False
# # time.sleep(1)
#
# if not deviceAPI.TouchCMD(aType=TOUCH_UP, contact=1, wait_time=1000):
# print('click failed')
# return False
#
# if | |
#analyze jsonl file form cause_effect sentence entity labeling (semantic role labeling)
import srsly
import csv
import sys
import datetime
file_name = "checkin_four_all_labels"
file_path = "C://Users//buchh//OneDrive/Desktop//cm//cm_jsonl//"+file_name+".jsonl"
file_name_answers = "answers2_answers"
file_path_answers = "C://Users//buchh//OneDrive/Desktop//cm//cm_jsonl//"+file_name_answers+".jsonl"
data = srsly.read_jsonl(file_path)
data_answers = srsly.read_jsonl(file_path_answers)
answer_username = "answers2"
username_extra = "checkin_four_all_labels-"
csv_columns_sub = [
"user",
"text",
"entity result",
"concept rel. result",
"contrib. rel. result",
"priority status",
"entity label",
"user word/phrase",
"correct word/phrase",
"user concept rel. TARGET(S)",
"correct concept rel. TARGET(S)",
"user contrib. rel. TARGET(S)",
"correct contrib. rel. TARGET(S)"
]
base_entity_dict = {}
base_entity_dict_answers = {}
all_users = []
tmp_line = []
tmp_line.append(csv_columns_sub)
count_dict = {}
count_dict_user = {}
relation_dict = {}
def create_dict(datasource, dict_name, user=None):
for entry in datasource:
if "text" in entry:
text = entry["text"]
else:
text = ""
throw("NO 'text' field encountered! This field is necessary for the rest of the script to work! Please fix this and then run this script.")
if "_session_id" in entry:
username = entry["_session_id"]
else:
username = ""
username = username.replace(username_extra, "")
if username not in all_users:
all_users.append(username)
try:
if entry['answer'] == "accept":
for conn in entry['relations']:
head_span_start = conn["head_span"]["start"]
head_span_end = conn["head_span"]["end"]
child_rel_span_start = conn["child_span"]["start"]
child_rel_span_end = conn["child_span"]["end"]
if text[head_span_start:head_span_end]:
head = text[head_span_start:head_span_end]
else:
head = ""
if text[child_rel_span_start:child_rel_span_end]:
child = [text[child_rel_span_start:child_rel_span_end]]
else:
child = []
if username in relation_dict:
if text in relation_dict[username]:
new_res = {'label': conn['label'], head:child}
relation_dict[username][text].append(new_res.copy())
else:
relation_dict[username][text] = [{'label': conn['label'], head: child}]
else:
relation_dict[username] = {text: [{'label': conn['label'], head: child}]}
for relation in entry['spans']:
if ("label" in relation) and ("start" in relation) and ("end" in relation):
child_span_start = relation["start"]
child_span_end = relation["end"]
if relation["label"] == "base":
base_entity = text[child_span_start:child_span_end]
else:
base_entity = ""
if relation["label"] == "type_of":
type_of = text[child_span_start:child_span_end]
else:
type_of = ""
if relation["label"] == "change_direction":
change_dir = text[child_span_start:child_span_end]
else:
change_dir = ""
if relation["label"] == "aspect_changing":
aspect_change = text[child_span_start:child_span_end]
else:
aspect_change = ""
if relation["label"] == "to_whom":
to_whom = text[child_span_start:child_span_end]
else:
to_whom = ""
if relation["label"] == "effect_size":
effect_size = text[child_span_start:child_span_end]
else:
effect_size = ""
if relation["label"] == "confidence":
confidence = text[child_span_start:child_span_end]
else:
confidence = ""
if relation["label"] == "where":
where = text[child_span_start:child_span_end]
else:
where = ""
if relation["label"] == "when":
when = text[child_span_start:child_span_end]
else:
when = ""
if relation["label"] == "predicate":
predicate = text[child_span_start:child_span_end]
else:
predicate = ""
if username in dict_name:
old_val = dict_name[username]
old_val.append({"base": base_entity,
"text": text,
"username": username,
"type_of": type_of,
"change_direction": change_dir,
"aspect_changing": aspect_change,
"to_whom": to_whom,
"effect_size": effect_size,
"confidence": confidence,
"where": where,
"when": when,
"predicate": predicate,
"relation": relation_dict[username][text]
})
dict_name[username] = old_val
else:
dict_name[username] = [{"base": base_entity,
"text": text,
"username": username,
"type_of": type_of,
"change_direction": change_dir,
"aspect_changing": aspect_change,
"to_whom": to_whom,
"effect_size": effect_size,
"confidence": confidence,
"where": where,
"when": when,
"predicate": predicate,
"relation": relation_dict[username][text]
}]
else:
if username in dict_name:
old_val = dict_name[username]
old_val.append({"base": "No base",
"text": text,
"username": username,
"type_of": "No type_of",
"change_direction": "No change_direction",
"aspect_changing": "No aspect_change",
"to_whom": "No to_whom",
"effect_size": "No effect_size",
"confidence": "No confidence",
"where": "No where",
"when": "No when",
"predicate": "No predicate",
})
dict_name[username] = old_val
else:
dict_name[username] = [{"base": "No base",
"text": text,
"username": username,
"type_of": "No type_of",
"change_direction": "No change_direction",
"aspect_changing": "No aspect_change",
"to_whom": "No to_whom",
"effect_size": "No effect_size",
"confidence": "No confidence",
"where": "No where",
"when": "No when",
"predicate": "No predicate"
}]
except KeyError as k:
continue
def create_count_dict(entity):
if entity in count_dict:
count_dict[entity] += 1
else:
count_dict[entity] = 1
def create_count_dict_user(entity, user):
if user not in count_dict_user:
count_dict_user[user] = {}
if entity in count_dict_user[user]:
count_dict_user[user][entity] += 1
else:
count_dict_user[user][entity] = 1
def get_answer_username():
global answer_username
for entry in data_answers:
if "_session_id" in entry:
answer_username = entry["_session_id"]
else:
answer_username = ""
answer_username = answer_username.replace(username_extra, "")
break
def get_answer_dict(datasource, dict_name):
for entry in datasource:
text = entry["text"]
for relation in entry["spans"]:
if ("label" in relation) and ("start" in relation) and ("end" in relation):
child_span_start = relation["start"]
child_span_end = relation["end"]
if relation["label"] == "base":
base_entity = [text[child_span_start:child_span_end]]
create_count_dict('base')
else:
base_entity = []
if relation["label"] == "type_of":
typeOf_entity = [text[child_span_start:child_span_end]]
create_count_dict('type_of')
else:
typeOf_entity = []
if relation["label"] == "change_direction":
change_dir_entity = [text[child_span_start:child_span_end]]
create_count_dict('change_direction')
else:
change_dir_entity = []
if relation["label"] == "aspect_changing":
aspect_change_entity = [text[child_span_start:child_span_end]]
create_count_dict('aspect_changing')
else:
aspect_change_entity = []
if relation["label"] == "to_whom":
to_whom_entity = [text[child_span_start:child_span_end]]
create_count_dict('to_whom')
else:
to_whom_entity = []
if relation["label"] == "effect_size":
effect_size_entity = [text[child_span_start:child_span_end]]
create_count_dict('effect_size')
else:
effect_size_entity = []
if relation["label"] == "confidence":
confidence_entity = [text[child_span_start:child_span_end]]
create_count_dict('confidence')
else:
confidence_entity = []
if relation["label"] == "where":
where_entity = [text[child_span_start:child_span_end]]
create_count_dict('where')
else:
where_entity = []
if relation["label"] == "when":
when_entity = [text[child_span_start:child_span_end]]
create_count_dict('when')
else:
when_entity = []
if relation["label"] == "predicate":
predicate_entity = [text[child_span_start:child_span_end]]
create_count_dict('predicate')
else:
predicate_entity = []
if text not in dict_name:
dict_name[text] = {"base": base_entity,
"type_of": typeOf_entity,
"change_direction": change_dir_entity,
"aspect_changing": aspect_change_entity,
"to_whom": to_whom_entity,
"effect_size": effect_size_entity,
"confidence": confidence_entity,
"where": where_entity,
"when": when_entity,
"predicate": predicate_entity
}
else:
updated_base = dict_name[text]['base'] + base_entity
updated_typeOf = dict_name[text]['type_of'] + typeOf_entity
updated_change_dir = dict_name[text]['change_direction'] + change_dir_entity
updated_aspect_changing = dict_name[text]['aspect_changing'] + aspect_change_entity
updated_to_whom = dict_name[text]['to_whom'] + to_whom_entity
updated_effect_size = dict_name[text]['effect_size'] + effect_size_entity
updated_confidence = dict_name[text]['confidence'] + confidence_entity
updated_where = dict_name[text]['where'] + where_entity
updated_when = dict_name[text]['when'] + when_entity
updated_predicate = dict_name[text]['predicate'] + predicate_entity
dict_name[text]['base'] = updated_base
dict_name[text]['type_of'] = updated_typeOf
dict_name[text]['change_direction'] = updated_change_dir
dict_name[text]['aspect_changing'] = updated_aspect_changing
dict_name[text]['to_whom'] = updated_to_whom
dict_name[text]['effect_size'] = updated_effect_size
dict_name[text]['confidence'] = updated_confidence
dict_name[text]['where'] = updated_where
dict_name[text]['when'] = updated_when
dict_name[text]['predicate'] = updated_predicate
def get_tmp_dict(user):
tmp_dict = {}
for x in base_entity_dict:
for y in base_entity_dict[x]:
if y['username'] == user and y['text'] in base_entity_dict_answers:
if y['text'] in tmp_dict:
if y['base']:
updated_base = tmp_dict[y['text']]['base'] + [y['base']]
tmp_dict[y['text']]['base'] = updated_base
if y['type_of']:
updated_typeOf = tmp_dict[y['text']]['type_of'] + [y['type_of']]
tmp_dict[y['text']]['type_of'] = updated_typeOf
if y['change_direction']:
updated_change_direction = tmp_dict[y['text']]['change_direction'] + [y['change_direction']]
tmp_dict[y['text']]['change_direction'] = updated_change_direction
if y['aspect_changing']:
updated_aspect_changing = tmp_dict[y['text']]['aspect_changing'] + [y['aspect_changing']]
tmp_dict[y['text']]['aspect_changing'] = updated_aspect_changing
if y['to_whom']:
updated_to_whom = tmp_dict[y['text']]['to_whom'] + [y['to_whom']]
tmp_dict[y['text']]['to_whom'] = updated_to_whom
if y['effect_size']:
updated_effect_size = tmp_dict[y['text']]['effect_size'] + [y['effect_size']]
tmp_dict[y['text']]['effect_size'] = updated_effect_size
if y['confidence']:
updated_confidence = tmp_dict[y['text']]['confidence'] + [y['confidence']]
tmp_dict[y['text']]['confidence'] = updated_confidence
if y['where']:
updated_where = tmp_dict[y['text']]['where'] + [y['where']]
tmp_dict[y['text']]['where'] = updated_where
if y['when']:
updated_when = tmp_dict[y['text']]['when'] + [y['when']]
tmp_dict[y['text']]['when'] = updated_when
if y['predicate']:
updated_predicate = tmp_dict[y['text']]['predicate'] + [y['predicate']]
tmp_dict[y['text']]['predicate'] = updated_predicate
else:
tmp_dict[y['text']] = {'base': [y['base']],
'type_of': [y['type_of']],
'change_direction': [y['change_direction']],
'aspect_changing': [y['aspect_changing']],
'to_whom': [y['to_whom']],
'effect_size': [y['effect_size']],
'confidence': [y['confidence']],
'where': [y['where']],
'when': [y['when']],
'predicate': [y['predicate']]
}
#print("=====================")
#print(user)
#print(len(tmp_dict))
#print("=====================")
return tmp_dict
def gen_rel_dicts(word, tmp_rel_dict):
rel_dict = {}
for y in tmp_rel_dict:
if word in y.keys():
if word in rel_dict:
updated_rel = rel_dict[word] + y[word]
rel_dict[word] = updated_rel
else:
rel_dict[word] = y[word]
rel_dict['label'] = y['label']
return rel_dict
def create_arr_res(user_ans, right_ans):
tmp_right_ans = []
tmp_user_ans = []
ans_arr = []
if right_ans:
tmp_user_ans = tmp_user_ans + user_ans
tmp_right_ans = tmp_right_ans + right_ans
del_right_ans = []
del_user_ans = []
for a in user_ans:
if a in tmp_right_ans:
ans_arr.append("found")
#print("found: " + a)
tmp_right_ans.remove(a)
tmp_user_ans.remove(a)
elif len(a.split(" ")) > 1:
strings_with_substring = [string for string in tmp_right_ans if a in string]
if strings_with_substring:
ans_arr.append("partial")
#print("partial: " + a)
if strings_with_substring[0] not in del_right_ans:
del_right_ans.append(strings_with_substring[0])
if a not in del_user_ans:
del_user_ans.append(a)
else:
for a1 in a.split(" "):
if a1 in tmp_right_ans:
#print("partial: " + a)
ans_arr.append("partial")
if a1 not in del_right_ans:
del_right_ans.append(a1)
if a not in del_user_ans:
del_user_ans.append(a)
else:
for ra in tmp_right_ans:
if len(ra.split(" ")) > 1:
for a2 in a.split(" "):
if a2 in ra.split(" "):
#print("partial: " + a)
ans_arr.append("partial")
if ra not in del_right_ans:
del_right_ans.append(ra)
if a not in del_user_ans:
del_user_ans.append(a)
else:
for ra in tmp_right_ans:
if len(ra.split(" ")) > 1:
for a2 in a.split(" "):
if a2 in ra.split(" "):
#print("partial: " + a)
ans_arr.append("partial")
if ra not in del_right_ans:
del_right_ans.append(ra)
if a not in del_user_ans:
del_user_ans.append(a)
if del_user_ans:
for da in del_user_ans:
tmp_user_ans.remove(da)
if del_right_ans:
for dr in del_right_ans:
tmp_right_ans.remove(dr)
if tmp_right_ans:
for r1 in tmp_right_ans:
#print("missing: " + r1)
ans_arr.append("missing")
if tmp_user_ans:
for ua in tmp_user_ans:
#print("incorrect: " + ua)
ans_arr.append("incorrect")
return ans_arr
def check_rel(word, right_word, user_relation, right_relation):
user_rel_dict = gen_rel_dicts(word, user_relation)
ans_rel_dict = gen_rel_dicts(right_word, right_relation)
ans_arr_final = []
user_label = ""
ans_label = ""
if word:
if word in user_rel_dict:
user_rel = user_rel_dict[word]
#print(user_rel_dict['label'])
user_label = user_rel_dict['label']
else:
user_rel = []
else:
user_rel = []
if right_word:
if right_word in ans_rel_dict:
answer_rel = | |
self.canvas.bind('<Shift-Down>', lambda event: self.pan_down(fraction = 1. / self.axes_height))
self.canvas.bind('<Shift-Left>', lambda event: self.pan_left(fraction = 1. / self.axes_width))
self.canvas.bind('<Shift-Right>', lambda event: self.pan_right(fraction = 1. / self.axes_width))
self.canvas.bind('=', lambda event: self.zoom_in())
self.canvas.bind('-', lambda event: self.zoom_out())
self.canvas.bind('<Control-equal>', lambda event: self.zoom_in(factor = 2.))
self.canvas.bind('<Control-minus>', lambda event: self.zoom_out(factor = 2.))
self.canvas.bind('+', lambda event: self.zoom_in(factor = math.sqrt(math.sqrt(2.))))
self.canvas.bind('_', lambda event: self.zoom_out(factor = math.sqrt(math.sqrt(2.))))
self.canvas.bind('h', lambda event: self.zoom_to_fit())
self.canvas.bind('<Home>', lambda event: self.zoom_to_fit())
self.canvas.bind('g', lambda event: self.grid('off') if self.grid() == 'on' else self.grid('on'))
self.canvas.bind('x', lambda event: self.xaxis('log') if self.xaxis() == 'linear' else self.xaxis('linear'))
self.canvas.bind('y', lambda event: self.yaxis('log') if self.yaxis() == 'linear' else self.yaxis('linear'))
self.canvas.bind('l', lambda event: self.yaxis('log') if self.yaxis() == 'linear' else self.yaxis('linear'))
self.canvas.bind('r', lambda event: self.yaxis('log', side = 'right') if self.yaxis(side = 'right') == 'linear' else self.yaxis('linear', side = 'right'))
def mouse_bindings(self):
self.marker_color = tk.StringVar()
self.marker_color.set('b')
self.marker = tk.StringVar()
self.marker.set('')
self.curve_color = tk.StringVar()
self.curve_color.set('b')
self.curve_style = tk.StringVar()
self.marker.set('')
self.curve_name = ''
self.curve_menu = tk.Menu(self.canvas, tearoff = 0)
marker_menu = tk.Menu(self.curve_menu, tearoff = 0)
for [val, name] in self.marker_names:
marker_menu.add_radiobutton(label = name, variable = self.marker, value = val, command = self.configure_curve_callback)
self.curve_menu.add_cascade(label = 'Marker', menu = marker_menu)
marker_color_menu = tk.Menu(self.curve_menu, tearoff = 0)
for [val, name] in self.color_names:
marker_color_menu.add_radiobutton(label = name, variable = self.marker_color, value = val, command = self.configure_curve_callback)
self.curve_menu.add_cascade(label = 'Marker color', menu = marker_color_menu)
curve_style_menu = tk.Menu(self.curve_menu, tearoff = 0)
for [val, name] in self.linestyle_names:
curve_style_menu.add_radiobutton(label = name, variable = self.curve_style, value = val, command = self.configure_curve_callback)
self.curve_menu.add_cascade(label = 'Curve style', menu = curve_style_menu)
curve_color_menu = tk.Menu(self.curve_menu, tearoff = 0)
for [val, name] in self.color_names:
curve_color_menu.add_radiobutton(label = name, variable = self.curve_color, value = val, command = self.configure_curve_callback)
self.curve_menu.add_cascade(label = 'Curve color', menu = curve_color_menu)
self.curve_menu.add_separator()
self.curve_menu.add_command(label = 'Delete', command = lambda: self.delete_curve(self.curve_name))
windowing_system = self.root.tk.call('tk', 'windowingsystem')
self.arrow = 'arrow'
# if windowing_system=='x11':
# self.zoom = ('@cursors/zoom.xbm', 'cursors/zoom.xbm', 'black', 'white')
# self.zoomin = ('@cursors/zoomin.xbm', 'cursors/zoommask.xbm', 'black', 'white')
# self.zoomout = ('@cursors/zoomout.xbm', 'cursors/zoommask.xbm', 'black', 'white')
# self.openhand = ('@cursors/openhand.xbm', 'cursors/openhandmask.xbm', 'black', 'white')
# self.closedhand = ('@cursors/closedhand.xbm', 'cursors/closedhandmask.xbm', 'black', 'white')
# elif windowing_system=='win32':
# self.zoom = '@cursors/zoom.cur'
# self.zoomin = '@cursors/zoomin.cur'
# self.zoomout = '@cursors/zoomout.cur'
# self.openhand = '@cursors/openhand.cur'
# self.closedhand = '@cursors/closedhand.cur'
# elif windowing_system=='aqua':
# self.zoom = 'arrow'
# self.zoomin = 'arrow'
# self.zoomout = 'arrow'
# self.openhand = 'openhand'
# self.closedhand = 'closedhand'
# else:
self.zoom = 'arrow'
self.zoomin = 'arrow'
self.zoomout = 'arrow'
self.openhand = 'arrow'
self.closedhand = 'arrow'
self.canvas.bind('<Control-Button-1>', self.curve_context_menu)
self.canvas.bind('<Button-3>', self.curve_context_menu)
self.canvas.bind('<Escape>', self.cancel_mouse_zoom_pan)
self.canvas.bind('z', self.setup_mouse_zoom)
self.canvas.bind('b', self.setup_mouse_box_zoom)
self.canvas.bind('p', self.setup_mouse_pan)
def curve_context_menu(self, event):
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
if (x > self.axes_left) and (x < self.axes_right) and (y > self.axes_top) and (y < self.axes_bottom):
items = self.canvas.find_overlapping(x - 2., y - 2., x + 2., y + 2.)
name = ''
for item in items:
tags = self.canvas.gettags(item)
if (tags != ()) and (tags[0] != 'current'):
name = tags[0]
if name != '':
if name in self.curves:
self.curve_name = name
self.marker_color.set(self.curves[name].marker_color)
self.marker.set(self.curves[name].marker)
self.curve_color.set(self.curves[name].curve_color)
self.curve_style.set(self.curves[name].curve_style)
else:
raise NameError('no curve exists with name = {0!r}'.format(name))
self.curve_menu.post(event.x_root, event.y_root)
def configure_curve_callback(self):
marker = self.marker.get()
if marker == ' ':
marker = ''
curve_style = self.curve_style.get()
if curve_style == ' ':
curve_style = ''
if self.curve_name in self.curves:
if (marker == '') and (curve_style == ''):
self.delete_curve(self.curve_name)
else:
self.curves[self.curve_name].marker_color = self.marker_color.get()
self.curves[self.curve_name].marker = marker
self.curves[self.curve_name].curve_color = self.curve_color.get()
self.curves[self.curve_name].curve_style = curve_style
else:
raise NameError('no curve exists with name = {0!r}'.format(name))
self.refresh_plot()
def cancel_mouse_zoom_pan(self, event):
self.canvas.bind('<Button-1>', lambda event: None)
self.canvas.bind('<Shift-Button-1>', lambda event: None)
self.canvas.bind('<Shift_L>', lambda event: None)
self.canvas.bind('<KeyRelease-Shift_L>', lambda event: None)
self.canvas.bind('<B1-Motion>', lambda event: None)
self.canvas.bind('<ButtonRelease-1>', lambda event: None)
self.canvas.configure(cursor = self.arrow)
def setup_mouse_zoom(self, event):
self.canvas.bind('<Button-1>', self.mouse_zoom_in)
self.canvas.bind('<Shift-Button-1>', self.mouse_zoom_out)
self.canvas.bind('<Shift_L>', lambda event: self.canvas.configure(cursor = self.zoomout))
self.canvas.bind('<KeyRelease-Shift_L>', lambda event: self.canvas.configure(cursor = self.zoomin))
self.canvas.bind('<B1-Motion>', lambda event: None)
self.canvas.bind('<ButtonRelease-1>', lambda event: None)
self.canvas.configure(cursor = self.zoomin)
def mouse_zoom_in(self, event):
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
if (x >= self.axes_left) and (x <= self.axes_right) and (y >= self.axes_top) and (y <= self.axes_bottom):
self.zoom_in(cx = x, cy = y)
def mouse_zoom_out(self, event):
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
if (x >= self.axes_left) and (x <= self.axes_right) and (y >= self.axes_top) and (y <= self.axes_bottom):
self.zoom_out(cx = x, cy = y)
def setup_mouse_box_zoom(self, event):
self.canvas.bind('<Button-1>', self.start_mouse_box_zoom)
self.canvas.bind('<Shift-Button-1>', lambda event: None)
self.canvas.bind('<Shift_L>', lambda event: None)
self.canvas.bind('<KeyRelease-Shift_L>', lambda event: None)
self.canvas.bind('<B1-Motion>', lambda event: None)
self.canvas.bind('<ButtonRelease-1>', lambda event: None)
self.canvas.configure(cursor = self.zoom)
def start_mouse_box_zoom(self, event):
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
if (x >= self.axes_left) and (x <= self.axes_right) and (y >= self.axes_top) and (y <= self.axes_bottom):
self.x0 = x
self.y0 = y
self.canvas.create_rectangle([self.x0, self.y0, self.x0, self.y0], outline = self.axes_color, fill = '', dash = (1, 4), tags = 'zoombox')
self.canvas.bind('<B1-Motion>', self.continue_mouse_box_zoom)
self.canvas.bind('<ButtonRelease-1>', self.finish_mouse_box_zoom)
def continue_mouse_box_zoom(self, event):
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
if x < self.axes_left:
x = self.axes_left
if x > self.axes_right:
x = self.axes_right
if y < self.axes_top:
y = self.axes_top
if y > self.axes_bottom:
y = self.axes_bottom
self.canvas.coords('zoombox', self.x0, self.y0, x, y)
def finish_mouse_box_zoom(self, event):
self.canvas.delete('zoombox')
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
if x < self.axes_left:
x = self.axes_left
if x > self.axes_right:
x = self.axes_right
if y < self.axes_top:
y = self.axes_top
if y > self.axes_bottom:
y = self.axes_bottom
if x < self.x0:
self.x0, x = x, self.x0
if y < self.y0:
self.y0, y = y, self.y0
self.zoom_rect([self.x0, self.y0, x, y])
self.canvas.bind('<B1-Motion>', lambda event: None)
self.canvas.bind('<ButtonRelease-1>', lambda event: None)
def setup_mouse_pan(self, event):
self.canvas.bind('<Button-1>', self.start_mouse_pan)
self.canvas.bind('<Shift-Button-1>', lambda event: None)
self.canvas.bind('<Shift_L>', lambda event: None)
self.canvas.bind('<KeyRelease-Shift_L>', lambda event: None)
self.canvas.bind('<B1-Motion>', lambda event: None)
self.canvas.bind('<ButtonRelease-1>', lambda event: None)
self.canvas.configure(cursor = self.openhand)
def start_mouse_pan(self, event):
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
if (x >= self.axes_left) and (x <= self.axes_right) and (y >= self.axes_top) and (y <= self.axes_bottom):
self.x0 = x
self.y0 = y
self.canvas.bind('<B1-Motion>', self.continue_mouse_pan)
self.canvas.bind('<ButtonRelease-1>', self.finish_mouse_pan)
self.canvas.configure(cursor = self.closedhand)
def continue_mouse_pan(self, event):
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
self.pan(dx = x - self.x0, dy = y - self.y0)
self.x0 = x
self.y0 = y
def finish_mouse_pan(self, event):
self.canvas.bind('<B1-Motion>', lambda event: None)
self.canvas.bind('<ButtonRelease-1>', lambda event: None)
self.canvas.configure(cursor = self.openhand)
def tk_backend(self):
self.erase_plot = self.tk_erase_plot
self.draw_rect = self.tk_draw_rect
self.draw_oval = self.tk_draw_oval
self.draw_line = self.tk_draw_line
self.draw_text = self.tk_draw_text
self.begin_group = self.tk_begin_group
self.end_group = self.tk_end_group
def tk_erase_plot(self):
self.canvas.delete('all')
def tk_draw_rect(self, **kwargs):
coords = kwargs.get('coords', [])
outline_color = kwargs.get('outline', '')
fill_color = kwargs.get('fill', '')
name = kwargs.get('name', '')
item = self.canvas.create_rectangle(coords, outline = outline_color, fill = fill_color)
if name != '':
self.canvas.itemconfig(item, tags = name)
def tk_draw_oval(self, **kwargs):
coords = kwargs.get('coords', [])
outline_color = kwargs.get('outline', '')
fill_color = kwargs.get('fill', '')
line_weight = kwargs.get('width', 1.)
name = kwargs.get('name', '')
item = self.canvas.create_oval(coords, outline = outline_color, fill = fill_color, width = line_weight)
if name != '':
self.canvas.itemconfig(item, tags = name)
def tk_draw_line(self, **kwargs):
coords = kwargs.get('coords', [])
fill = kwargs.get('fill', '')
line_style = kwargs.get('dash', ())
line_weight = kwargs.get('width', 1.)
name = kwargs.get('name', '')
item = self.canvas.create_line(coords, fill = fill, dash = line_style, width = line_weight)
if name != '':
self.canvas.itemconfig(item, tags = name)
def tk_draw_text(self, **kwargs):
text = kwargs.get('text', '')
coords = kwargs.get('coords', [])
fill = kwargs.get('fill', '')
font = kwargs.get('font', (self.label_font, self.label_fontsize))
anchor = kwargs.get('anchor', 'center')
justify = kwargs.get('justify', 'center')
name = kwargs.get('name', '')
if text != '':
item = self.canvas.create_text(coords, text = text, font = font, fill = fill, anchor = anchor, justify = justify)
if name != '':
self.canvas.itemconfig(item, tags = name)
def tk_begin_group(self, **kwargs):
pass
def tk_end_group(self, **kwargs):
pass
def svg_backend(self):
self.erase_plot = self.svg_erase_plot
self.draw_rect = self.svg_draw_rect
self.draw_oval = self.svg_draw_oval
self.draw_line = self.svg_draw_line
self.draw_text = self.svg_draw_text
self.begin_group = self.svg_begin_group
self.end_group = self.svg_end_group
def svg_erase_plot(self):
pass
def svg_draw_rect(self, **kwargs):
coords = kwargs.get('coords', [])
outline_color = kwargs.get('outline', 'none')
fill_color = kwargs.get('fill', 'none')
name = kwargs.get('name', '')
self.svg_file.write(u'{indent}<rect x="{x!s}" y="{y!s}" width="{width!s}" height="{height!s}" stroke="{outline_color}" fill="{fill_color}"/>\n'.format(indent = ' ' * self.svg_indent_level, x = coords[0], y = coords[1], width = coords[2] - coords[0], height = coords[3] - coords[1], outline_color = outline_color, fill_color = fill_color))
def svg_draw_oval(self, **kwargs):
coords = kwargs.get('coords', [])
outline_color = kwargs.get('outline', '')
fill_color = kwargs.get('fill', '')
line_weight = kwargs.get('width', 1.)
name = kwargs.get('name', '')
self.svg_file.write(u'{indent}<ellipse cx="{cx!s}" cy="{cy!s}" rx="{rx!s}" ry="{ry!s}" stroke="{outline_color}" stroke-width="{width!s}px" fill="{fill_color}"/>\n'.format(indent = ' ' * self.svg_indent_level, cx = 0.5 * (coords[0] + | |
self.columns = columns
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self):
'''
Checks whether the frame should be truncated. If so, slices
the frame up.
'''
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
max_rows_adj = self.h - n_add_rows # rows available to fill with actual data
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = frame[:, :col_num].concat(frame[:, -col_num:], axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
frame = frame[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = frame[:row_num, :].concat(frame[-row_num:, :])
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
str_columns = self._get_formatted_column_labels(frame)
if self.header:
stringified = []
for i, c in enumerate(frame.columns):
cheader = str_columns[i]
max_colwidth = max(self.col_space or 0,
*(self.adj.len(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=max_colwidth,
adj=self.adj)
max_len = max(max([self.adj.len(x) for x in fmt_values]),
max_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
else:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0),
adj=self.adj)
stringified.append(fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
col_width = self.adj.len(strcols[self.tr_size_col][0]) # infer from column header
strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
cwidth = self.adj.len(strcols[ix][row_num]) # infer from above row
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_mode = 'left'
elif is_dot_col:
cwidth = self.adj.len(strcols[self.tr_size_col][0])
dot_mode = 'center'
else:
dot_mode = 'right'
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s')
% (type(self.frame).__name__,
pprint_thing(frame.columns),
pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print the whole frame
text = self.adj.adjoin(1, *strcols)
elif not isinstance(self.max_cols, int) or self.max_cols > 0: # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split('\n')
row_lens = [len(it) for it in text]
max_len_col_ix = row_lens.index(max(row_lens))
max_len = row_lens[max_len_col_ix]
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
max_len += size_tr_col # Need to make space for largest row plus truncate dot col
dif = max_len - self.w
adj_dif = dif
col_lens = [max(len(it) for it in ele) for ele in strcols]
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
# mid_ix = col_lens.index[mid]
col_len = col_lens[mid]
adj_dif -= (col_len + 1) # adjoin adds one
col_lens = col_lens[:mid] + col_len[mid+1: ]
n_cols = len(col_lens)
max_cols_adj = n_cols - self.index # subtract index column
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[%d rows x %d columns]"
% (len(frame), len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= max(self.adj.len(x) for x in idx) + adjoin_width
col_widths = [max(self.adj.len(x) for x in col)
if len(col) > 0 else 0
for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
frame[:, i],
frame.dtypes[i],
formatter, float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space
)
def to_html(self, classes=None, notebook=False):
"""
Render a DataFrame to a html table.
Parameters
----------
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
"""
html_renderer = HTMLFormatter(self, classes=classes,
max_rows=self.max_rows,
max_cols=self.max_cols,
notebook=notebook)
if hasattr(self.buf, 'write'):
html_renderer.write_result(self.buf)
elif isinstance(self.buf, six.string_types):
with open(self.buf, 'w') as f:
html_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
def is_numeric_dtype(dtype):
return is_number(dtype)
columns = frame.columns
fmt_columns = [col.name for col in columns]
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x
if not self._get_formatter(i) and need_leadsp[x]
else x]
for i, (col, x) in
enumerate(zip(columns, fmt_columns))]
if self.show_index_names and self.has_index_names:
for x in str_columns:
x.append('')
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by to_html().
index = frame.index
show_index_names = self.show_index_names and self.has_index_names
show_col_names = (self.show_index_names and self.has_column_names)
fmt = self._get_formatter('__index__')
fmt_index = [[str(i) for i in index]]
fmt_index = [tuple(_make_fixed_width(list(x), justify='left',
minimum=(self.col_space or 0),
adj=self.adj))
for x in fmt_index]
adjoined = self.adj.adjoin(1, *fmt_index).split('\n')
# empty space for columns
if show_col_names:
col_header = ['%s' % x for x in self._get_column_name_list()]
else:
col_header = ['']
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
names.append('' if columns.name is None else columns.name)
return names
class HTMLFormatter(TableFormatter):
indent_delta = 2
def __init__(self, formatter, classes=None, max_rows=None, max_cols=None,
notebook=False):
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements = []
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.escape = self.fmt.kwds.get('escape', True)
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
self.max_cols < len(self.fmt.columns))
self.notebook = notebook
def write(self, s, indent=0):
rs = pprint_thing(s)
self.elements.append(' ' * indent + rs)
def write_th(self, s, indent=0, tags=None):
if (self.fmt.col_space is not None
and self.fmt.col_space > 0):
tags = (tags or "")
tags += 'style="min-width: %s;"' % self.fmt.col_space
return self._write_cell(s, kind='th', indent=indent, tags=tags)
def write_td(self, s, indent=0, tags=None):
return self._write_cell(s, kind='td', indent=indent, tags=tags)
def _write_cell(self, s, kind='td', indent=0, tags=None):
if tags is not None:
start_tag = '<%s %s>' % (kind, tags)
else:
start_tag = '<%s>' % kind
if self.escape:
# escape & first to prevent double escaping of &
esc = OrderedDict(
[('&', r'&'), ('<', r'<'), ('>', r'>')]
)
else:
| |
import pprint
import copy
import random
import numpy as np
from sureal.tools.misc import empty_object, get_unique_sorted_list
from sureal.tools.decorator import memoized as persist
from sureal.tools.misc import get_unique_sorted_list
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
class DatasetReader(object):
def __init__(self, dataset, **kwargs):
self.dataset = dataset
self._assert_dataset()
def _assert_dataset(self):
# assert content id is from 0 to the total_content - 1
cids = []
for ref_video in self.dataset.ref_videos:
cids.append(ref_video['content_id'])
expected_cids = range(np.max(cids) + 1)
for cid in cids:
assert cid in expected_cids, \
'reference video content_ids must be in [0, {}), but is {}'.\
format(self.num_ref_videos, cid)
# assert dis_video content_id is content_ids
for dis_video in self.dataset.dis_videos:
assert dis_video['content_id'] in cids, \
"dis_video of content_id {content_id}, asset_id {asset_id} must have content_id in {cids}".format(
content_id=dis_video['content_id'], asset_id=dis_video['asset_id'], cids=cids)
@property
def num_dis_videos(self):
return len(self.dataset.dis_videos)
@property
def num_ref_videos(self):
return len(self.dataset.ref_videos)
@property
def max_content_id_of_ref_videos(self):
return max(map(lambda ref_video: ref_video['content_id'], self.dataset.ref_videos))
@property
def content_ids(self):
return list(set(map(lambda ref_video: ref_video['content_id'], self.dataset.ref_videos)))
@property
def asset_ids(self):
return list(set(map(lambda dis_video: dis_video['asset_id'], self.dataset.dis_videos)))
@property
def content_id_of_dis_videos(self):
return list(map(lambda dis_video: dis_video['content_id'], self.dataset.dis_videos))
@property
def _contentid_to_refvideo_map(self):
d = {}
for ref_video in self.dataset.ref_videos:
d[ref_video['content_id']] = ref_video
return d
@property
def disvideo_is_refvideo(self):
d = self._contentid_to_refvideo_map
return list(map(
lambda dis_video: d[dis_video['content_id']]['path'] == dis_video['path'],
self.dataset.dis_videos
))
@property
def ref_score(self):
return self.dataset.ref_score if hasattr(self.dataset, 'ref_score') else None
def to_dataset(self):
return self.dataset
@staticmethod
def write_out_dataset(dataset, output_dataset_filepath):
assert (hasattr(dataset, 'ref_videos'))
assert (hasattr(dataset, 'dis_videos'))
# write out
with open(output_dataset_filepath, 'wt') as output_file:
for key in dataset.__dict__.keys():
if key != 'ref_videos' and key != 'dis_videos' \
and key != 'subjects' and not key.startswith('__'):
output_file.write('{} = '.format(key) + repr(
dataset.__dict__[key]) + '\n')
output_file.write('\n')
output_file.write('ref_videos = ' + pprint.pformat(
dataset.ref_videos) + '\n')
output_file.write('\n')
output_file.write('dis_videos = ' + pprint.pformat(
dataset.dis_videos) + '\n')
if 'subjects' in dataset.__dict__.keys():
output_file.write('\n')
output_file.write('subjects = ' + pprint.pformat(
dataset.subjects) + '\n')
class RawDatasetReader(DatasetReader):
"""
Reader for a subjective quality test dataset with raw scores (dis_video must
has key of 'os' (opinion score)).
"""
def _assert_dataset(self):
"""
Override DatasetReader._assert_dataset
"""
super(RawDatasetReader, self)._assert_dataset()
# assert each dis_video dict has key 'os' (opinion score), and must
# be iterable (list, tuple or dictionary)
for dis_video in self.dataset.dis_videos:
assert 'os' in dis_video, "dis_video must have key 'os' (opinion score)"
assert isinstance(dis_video['os'], (list, tuple, dict))
# make sure each dis video has equal number of observers
if (
isinstance(self.dataset.dis_videos[0]['os'], list) or
isinstance(self.dataset.dis_videos[0]['os'], tuple)
):
num_observers = len(self.dataset.dis_videos[0]['os'])
for dis_video in self.dataset.dis_videos[1:]:
assert num_observers == len(dis_video['os']), \
"expect number of observers {expected} but got {actual} for {dis_video}".format(
expected=num_observers, actual=len(dis_video['os']), dis_video=str(dis_video))
@property
def num_observers(self):
if (
isinstance(self.dataset.dis_videos[0]['os'], list) or
isinstance(self.dataset.dis_videos[0]['os'], tuple)
):
return len(self.dataset.dis_videos[0]['os'])
elif isinstance(self.dataset.dis_videos[0]['os'], dict):
list_observers = self._get_list_observers()
return len(list_observers)
else:
assert False, ''
def _get_list_observers(self):
for dis_video in self.dataset.dis_videos:
assert isinstance(dis_video['os'], dict)
list_observers = []
for dis_video in self.dataset.dis_videos:
list_observers += dis_video['os'].keys()
return get_unique_sorted_list(list_observers)
@property
def opinion_score_2darray(self):
"""
2darray storing raw opinion scores, with first dimension the number of
distorted videos, second dimension the number of observers
"""
score_mtx = float('NaN') * np.ones([self.num_dis_videos, self.num_observers])
if isinstance(self.dataset.dis_videos[0]['os'], list) \
or isinstance(self.dataset.dis_videos[0]['os'], tuple):
for i_dis_video, dis_video in enumerate(self.dataset.dis_videos):
score_mtx[i_dis_video, :] = dis_video['os']
elif isinstance(self.dataset.dis_videos[0]['os'], dict):
list_observers = self._get_list_observers()
for i_dis_video, dis_video in enumerate(self.dataset.dis_videos):
for i_observer, observer in enumerate(list_observers):
if observer in dis_video['os']:
score_mtx[i_dis_video, i_observer] = dis_video['os'][observer]
else:
assert False
return score_mtx
def to_aggregated_dataset(self, aggregate_scores, **kwargs):
newone = self._prepare_new_dataset(kwargs)
# ref_videos: deepcopy
newone.ref_videos = copy.deepcopy(self.dataset.ref_videos)
# dis_videos: use input aggregate scores
dis_videos = []
assert len(self.dataset.dis_videos) == len(aggregate_scores)
for dis_video, score in zip(self.dataset.dis_videos, aggregate_scores):
dis_video2 = copy.deepcopy(dis_video)
if 'os' in dis_video2: # remove 'os' - opinion score
del dis_video2['os']
dis_video2['groundtruth'] = score
dis_videos.append(dis_video2)
# add scores std if available
if 'scores_std' in kwargs and kwargs['scores_std'] is not None:
assert len(dis_videos) == len(kwargs['scores_std'])
for dis_video, score_std in zip(dis_videos, kwargs['scores_std']):
dis_video['groundtruth_std'] = score_std
if 'aggregate_content_ids' in kwargs and kwargs['aggregate_content_ids'] is not None:
dis_videos = list(filter(lambda dis_video: dis_video['content_id'] in kwargs['aggregate_content_ids'], dis_videos))
if 'aggregate_asset_ids' in kwargs and kwargs['aggregate_asset_ids'] is not None:
dis_videos = list(filter(lambda dis_video: dis_video['asset_id'] in kwargs['aggregate_asset_ids'], dis_videos))
newone.dis_videos = dis_videos
return newone
def _prepare_new_dataset(self, kwargs):
newone = empty_object()
# systematically copy fields, e.g. dataset_name, yuv_fmt, width, height, ...
for key in self.dataset.__dict__.keys():
if not key.startswith('__'): # filter out those e.g. __builtin__ ...
setattr(newone, key, getattr(self.dataset, key))
if 'quality_width' in kwargs and kwargs['quality_width'] is not None:
newone.quality_width = kwargs['quality_width']
elif hasattr(self.dataset, 'quality_width'):
newone.quality_width = self.dataset.quality_width
if 'quality_height' in kwargs and kwargs['quality_height'] is not None:
newone.quality_height = kwargs['quality_height']
elif hasattr(self.dataset, 'quality_height'):
newone.quality_height = self.dataset.quality_height
if 'resampling_type' in kwargs and kwargs['resampling_type'] is not None:
newone.resampling_type = kwargs['resampling_type']
elif hasattr(self.dataset, 'resampling_type'):
newone.resampling_type = self.dataset.resampling_type
return newone
def to_aggregated_dataset_file(self, dataset_filepath, aggregate_scores, **kwargs):
aggregate_dataset = self.to_aggregated_dataset(aggregate_scores, **kwargs)
self.write_out_dataset(aggregate_dataset, dataset_filepath)
def to_persubject_dataset(self, quality_scores, **kwargs):
import math
newone = self._prepare_new_dataset(kwargs)
# ref_videos: deepcopy
newone.ref_videos = copy.deepcopy(self.dataset.ref_videos)
# dis_videos: use input aggregate scores
dis_videos = []
for dis_video, quality_score in zip(self.dataset.dis_videos, quality_scores):
assert 'os' in dis_video
# quality_score should be a 1-D array with (processed) per-subject scores
assert hasattr(quality_score, '__len__')
# new style: opinion is specified as a dict: user -> score. In this
# case, quality_score may contain nan. In this case: filter them out
if isinstance(dis_video['os'], dict):
quality_score = list(filter(lambda x: not math.isnan(x), quality_score))
assert len(dis_video['os']) == len(quality_score)
for persubject_score in quality_score:
dis_video2 = copy.deepcopy(dis_video)
if 'os' in dis_video2: # remove 'os' - opinion score
del dis_video2['os']
dis_video2['groundtruth'] = persubject_score
dis_videos.append(dis_video2)
newone.dis_videos = dis_videos
return newone
def to_persubject_dataset_file(self, dataset_filepath, quality_scores, **kwargs):
persubject_dataset = self.to_persubject_dataset(quality_scores, **kwargs)
self.write_out_dataset(persubject_dataset, dataset_filepath)
def to_pc_dataset(self, **kwargs):
newone = self._prepare_new_dataset(kwargs)
# ref_videos: deepcopy
newone.ref_videos = copy.deepcopy(self.dataset.ref_videos)
pc_type = kwargs['pc_type'] if 'pc_type' in kwargs and kwargs['pc_type'] is not None else 'within_subject_within_content'
tiebreak_method = kwargs['tiebreak_method'] if 'tiebreak_method' in kwargs and kwargs['tiebreak_method'] is not None else 'even_split'
sampling_seed = kwargs['sampling_seed'] if 'sampling_seed' in kwargs and kwargs['sampling_seed'] is not None else None
sampling_rate = kwargs['sampling_rate'] if 'sampling_rate' in kwargs and kwargs['sampling_rate'] is not None else None
per_asset_sampling_rates = kwargs['per_asset_sampling_rates'] if 'per_asset_sampling_rates' in kwargs and kwargs['per_asset_sampling_rates'] is not None else None
cointoss_rate = kwargs['cointoss_rate'] if 'cointoss_rate' in kwargs and kwargs['cointoss_rate'] is not None else None
per_asset_cointoss_rates = kwargs['per_asset_cointoss_rates'] if 'per_asset_cointoss_rates' in kwargs and kwargs['per_asset_cointoss_rates'] is not None else None
noise_level = kwargs['noise_level'] if 'noise_level' in kwargs and kwargs['noise_level'] is not None else None
per_asset_noise_levels = kwargs['per_asset_noise_levels'] if 'per_asset_noise_levels' in kwargs and kwargs['per_asset_noise_levels'] is not None else None
per_asset_mean_scores = kwargs['per_asset_mean_scores'] if 'per_asset_mean_scores' in kwargs and kwargs['per_asset_mean_scores'] is not None else None
assert pc_type == 'within_subject_within_content' or pc_type == 'within_subject'
assert tiebreak_method == 'even_split' or tiebreak_method == 'coin_toss'
assert not (sampling_rate is not None and per_asset_sampling_rates is not None)
if sampling_rate is not None:
assert np.isscalar(sampling_rate) and 0.0 <= sampling_rate
if per_asset_sampling_rates is not None:
assert len(per_asset_sampling_rates) == len(self.dataset.dis_videos)
for per_asset_sampling_rate in per_asset_sampling_rates:
assert np.isscalar(per_asset_sampling_rate) and 0.0 <= per_asset_sampling_rate
assert not (cointoss_rate is not None and per_asset_cointoss_rates is not None)
if cointoss_rate is not None:
assert np.isscalar(cointoss_rate) and 0.0 <= cointoss_rate <= 1.0
if per_asset_cointoss_rates is not None:
assert len(per_asset_cointoss_rates) == len(self.dataset.dis_videos)
for cointoss_rate_ in per_asset_cointoss_rates:
assert np.isscalar(cointoss_rate_) and 0.0 <= cointoss_rate_ <= 1.0
assert not (noise_level is not None and per_asset_noise_levels is not None)
if noise_level is not None:
assert np.isscalar(noise_level) and 0.0 <= noise_level
if per_asset_noise_levels is not None:
assert len(per_asset_noise_levels) == len(self.dataset.dis_videos)
for noise_level_ in per_asset_noise_levels:
assert np.isscalar(noise_level_) and 0.0 <= noise_level_
if per_asset_mean_scores is not None:
assert len(per_asset_mean_scores) == len(self.dataset.dis_videos)
for mean_score_ in per_asset_mean_scores:
assert np.isscalar(mean_score_)
dis_videos = self.dataset.dis_videos
if isinstance(dis_videos[0]['os'], dict):
pass
elif isinstance(dis_videos[0]['os'], (list, tuple)):
# converting to dict_style
for dis_video in dis_videos:
scores = dis_video['os']
dis_video['os'] = dict(zip(map(lambda x: str(x), range(len(scores))), scores))
else:
assert False
# build nested subject-asset_id dict: subj -> (asset_id -> {'score': score, 'content_id': content_id, ...})
d_subj_assetid = dict()
for dis_video in dis_videos:
for subj in dis_video['os']:
if subj not in d_subj_assetid:
d_subj_assetid[subj] = dict()
assert dis_video['asset_id'] not in d_subj_assetid[subj] # assuming no repetition for single subject and a dis_video
d_subj_assetid[subj][dis_video['asset_id']] = {'score': dis_video['os'][subj], 'content_id': dis_video['content_id']}
# prepare new dis_videos, and create index from asset_id to dis_videos
new_dis_videos = copy.deepcopy(dis_videos)
d_assetid_disvideoidx = dict() # build dict: asset_id -> index of dis_videos
for i_dis_video, | |
+
cmp_meta[f"DS_{ds_pure_id}_Description"],
"RepairAction": cmp_meta[
f"DS_{ds_pure_id}_Repair_Description"]
}})
else:
ds_combo = [f'DS{_.start() + 1}'
for _ in re.finditer(
'1', ds_map[::-1])]
ls_meta.update({f"DS{ds_id}": {
"Description": 'Combination of ' +
' & '.join(ds_combo),
"RepairAction": 'Combination of pure DS '
'repair actions.'
}})
# adjust weights to respect the assumption that at least
# one DS will occur (i.e., the case with all DSs returning
# False is not part of the event space)
sim_weights = np.array(sim_weights) / np.sum(sim_weights)
weights = sim_weights
theta_0 = median_demands[0]
theta_1 = dispersions[0]
weights = ' | '.join([f"{w:.6f}" for w in weights])
df_db.loc[cmp.Index, f'LS{LS_i}-DamageStateWeights'] = weights
# then look at the sequential DS cases
elif LS_contents[0].startswith('DS'):
# this is straightforward, store the data in the table and dict
ds_id = LS_contents[0][2]
theta_0 = getattr(cmp, f"DS_{ds_id}_Median_Demand")
theta_1 = getattr(cmp, f"DS_{ds_id}_Total_Dispersion_Beta")
ls_meta.update({f"DS{ds_id}": {
"Description": cmp_meta[f"DS_{ds_id}_Description"],
"RepairAction": cmp_meta[f"DS_{ds_id}_Repair_Description"]
}})
# FEMA P58 assumes lognormal distribution for every fragility
df_db.loc[cmp.Index, f'LS{LS_i}-Family'] = 'lognormal'
# identify incomplete cases...
# where theta is missing
if theta_0 != 'By User':
df_db.loc[cmp.Index, f'LS{LS_i}-Theta_0'] = theta_0
else:
incomplete = True
# where beta is missing
if theta_1 != 'By User':
df_db.loc[cmp.Index, f'LS{LS_i}-Theta_1'] = theta_1
else:
incomplete = True
# store the collected metadata for this limit state
meta_data['LimitStates'].update({f"LS{LS_i}": ls_meta})
# store the incomplete flag for this component
df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)
# store the metadata for this component
meta_dict.update({cmp.Index: meta_data})
# rename the index
df_db.index.name = "ID"
# convert to optimal datatypes to reduce file size
df_db = df_db.convert_dtypes()
# save the fragility data
df_db.to_csv(target_data_file)
# save the metadata
with open(target_meta_file, 'w+') as f:
json.dump(meta_dict, f, indent=2)
print("Successfully parsed and saved the fragility data from FEMA P58")
def create_FEMA_P58_bldg_repair_db(
source_file,
target_data_file='bldg_repair_DB_FEMA_P58_2nd.csv',
target_meta_file='bldg_repair_DB_FEMA_P58_2nd.json'):
"""
Create a repair consequence parameter database based on the FEMA P58 data
The method was developed to process v3.1.2 of the FragilityDatabase xls
that is provided with FEMA P58 2nd edition.
Parameters
----------
source_file: string
Path to the fragility database file.
target_data_file: string
Path where the consequence data file should be saved. A csv file is
expected.
target_meta_file: string
Path where the consequence metadata should be saved. A json file is
expected.
"""
# parse the source file
df = pd.concat(
[pd.read_excel(source_file, sheet_name=sheet, header=2, index_col=1)
for sheet in ['Summary', 'Cost Summary']], axis=1)
# remove duplicate columns
# (there are such because we joined two tables that were read separately)
df = df.loc[:, ~df.columns.duplicated()]
# remove empty rows and columns
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
# filter the columns we need for the repair database
cols_to_db = [
"Fragility Unit of Measure",
'DS Hierarchy',
]
for DS_i in range(1, 6):
cols_to_db += [
f"Best Fit, DS{DS_i}",
f"Lower Qty Mean, DS{DS_i}",
f"Upper Qty Mean, DS{DS_i}",
f"Lower Qty Cutoff, DS{DS_i}",
f"Upper Qty Cutoff, DS{DS_i}",
f"CV / Dispersion, DS{DS_i}",
f"Best Fit, DS{DS_i}.1",
f"Lower Qty Mean, DS{DS_i}.1",
f"Upper Qty Mean, DS{DS_i}.1",
f"Lower Qty Cutoff, DS{DS_i}.1",
f"Upper Qty Cutoff, DS{DS_i}.1",
f"CV / Dispersion, DS{DS_i}.2",
f"DS {DS_i}, Long Lead Time",
f'Repair Cost, p10, DS{DS_i}',
f'Repair Cost, p50, DS{DS_i}',
f'Repair Cost, p90, DS{DS_i}',
f'Time, p10, DS{DS_i}',
f'Time, p50, DS{DS_i}',
f'Time, p90, DS{DS_i}',
f'Mean Value, DS{DS_i}',
f'Mean Value, DS{DS_i}.1',
]
# filter the columns that we need for the metadata
cols_to_meta = [
"Component Name",
"Component Description",
"Construction Quality:",
"Seismic Installation Conditions:",
"Comments / Notes",
"Author",
"Fragility Unit of Measure",
"Round to Integer Unit?",
"DS 1, Description",
"DS 1, Repair Description",
"DS 2, Description",
"DS 2, Repair Description",
"DS 3, Description",
"DS 3, Repair Description",
"DS 4, Description",
"DS 4, Repair Description",
"DS 5, Description",
"DS 5, Repair Description",
]
# remove special characters to make it easier to work with column names
str_map = {
ord(' '): "_",
ord('.'): "_",
ord(':'): None,
ord('('): None,
ord(')'): None,
ord('?'): None,
ord('/'): None,
ord(','): None,
}
df_db_source = df.loc[:, cols_to_db]
df_db_source.columns = [s.translate(str_map) for s in cols_to_db]
df_db_source.sort_index(inplace=True)
df_meta = df.loc[:, cols_to_meta]
df_meta.columns = [s.translate(str_map) for s in cols_to_meta]
df_db_source.replace('BY USER', np.nan, inplace=True)
# initialize the output loss table
# define the columns
out_cols = [
"Incomplete",
"Quantity-Unit",
"DV-Unit",
]
for DS_i in range(1, 16):
out_cols += [
f"DS{DS_i}-Family",
f"DS{DS_i}-Theta_0",
f"DS{DS_i}-Theta_1",
f"DS{DS_i}-LongLeadTime",
]
# create the MultiIndex
comps = df_db_source.index.values
DVs = ['Cost', 'Time']
df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])
df_db = pd.DataFrame(
columns=out_cols,
index=df_MI,
dtype=float
)
# initialize the dictionary that stores the loss metadata
meta_dict = {}
convert_family = {
'LogNormal': 'lognormal',
'Normal': 'normal'
}
# for each component...
# (this approach is not efficient, but easy to follow which was considered
# more important than efficiency.)
for cmp in df_db_source.itertuples():
# assume the component information is complete
incomplete_cost = False
incomplete_time = False
# store units
df_db.loc[cmp.Index, 'Quantity-Unit'] = (
' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())
df_db.loc[(cmp.Index, 'Cost'), 'DV-Unit'] = "US$_2011"
df_db.loc[(cmp.Index, 'Time'), 'DV-Unit'] = "worker_day"
# get the raw metadata for the component
cmp_meta = df_meta.loc[cmp.Index, :]
# store the global (i.e., not DS-specific) metadata
# every component is assumed to have a comp. description
comments = cmp_meta['Component_Description']
# the additional fields are added to the description if they exist
if cmp_meta['Construction_Quality'] != 'Not Specified':
comments += f'\nConstruction Quality: ' \
f'{cmp_meta["Construction_Quality"]}'
if cmp_meta['Seismic_Installation_Conditions'] not in [
'Not Specified', 'Not applicable', 'Unknown', 'Any']:
comments += f'\nSeismic Installation Conditions: ' \
f'{cmp_meta["Seismic_Installation_Conditions"]}'
if cmp_meta['Comments__Notes'] != 'None':
comments += f'\nNotes: {cmp_meta["Comments__Notes"]}'
if cmp_meta['Author'] not in ['Not Given', 'By User']:
comments += f'\nAuthor: {cmp_meta["Author"]}'
# get the suggested block size and replace
# the misleading values with ea
block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]
meta_data = {
"Description": cmp_meta['Component_Name'],
"Comments": comments,
"SuggestedComponentBlockSize": ' '.join(block_size),
"RoundUpToIntegerQuantity": cmp_meta['Round_to_Integer_Unit'],
"ControllingDemand": "Damage Quantity",
"DamageStates": {}
}
# Handle components with simultaneous damage states separately
if 'Simul' in cmp.DS_Hierarchy:
# Note that we are assuming that all damage states are triggered by
# a single limit state in these components.
# This assumption holds for the second edition of FEMA P58, but it
# might need to be revisited in future editions.
cost_est = {}
time_est = {}
# get the p10, p50, and p90 estimates for all damage states
for DS_i in range(1, 6):
if not pd.isna(getattr(cmp, f'Repair_Cost_p10_DS{DS_i}')):
cost_est.update({f'DS{DS_i}': np.array([
getattr(cmp, f'Repair_Cost_p10_DS{DS_i}'),
getattr(cmp, f'Repair_Cost_p50_DS{DS_i}'),
getattr(cmp, f'Repair_Cost_p90_DS{DS_i}'),
getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}'),
getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}')
])})
time_est.update({f'DS{DS_i}': np.array([
getattr(cmp, f'Time_p10_DS{DS_i}'),
getattr(cmp, f'Time_p50_DS{DS_i}'),
getattr(cmp, f'Time_p90_DS{DS_i}'),
getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1'),
getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1'),
int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES')
])})
# now prepare the equivalent mutex damage states
sim_ds_count = len(cost_est.keys())
ds_count = 2 ** (sim_ds_count) - 1
for DS_i in range(1, ds_count + 1):
ds_map = format(DS_i, f'0{sim_ds_count}b')
cost_vals = np.sum([cost_est[f'DS{ds_i + 1}']
if ds_map[-ds_i - 1] == '1'
else np.zeros(5)
for ds_i in range(sim_ds_count)],
axis=0)
time_vals = np.sum([time_est[f'DS{ds_i + 1}']
if ds_map[-ds_i - 1] == '1'
else np.zeros(6)
for ds_i in range(sim_ds_count)],
axis=0)
# fit a distribution
family_hat, theta_hat = uq.fit_distribution_to_percentiles(
cost_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])
cost_theta = theta_hat
if family_hat == 'normal':
cost_theta[1] = cost_theta[1] / cost_theta[0]
time_theta = [time_vals[1],
np.sqrt(cost_theta[1] ** 2.0 + 0.25 ** 2.0)]
# Note that here we assume that the cutoff quantities are
# identical across damage states.
# This assumption holds for the second edition of FEMA P58, but
# it might need to be revisited in future editions.
cost_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1')
cost_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1')
time_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1_1')
time_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1_1')
# store the results
df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = family_hat
df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = (
f"{cost_vals[3]:g},{cost_vals[4]:g}|"
f"{cost_qnt_low:g},{cost_qnt_up:g}")
df_db.loc[(cmp.Index, 'Cost'),
f'DS{DS_i}-Theta_1'] = f"{cost_theta[1]:g}"
df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = family_hat
df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = (
f"{time_vals[3]:g},{time_vals[4]:g}|"
f"{time_qnt_low:g},{time_qnt_up:g}")
df_db.loc[(cmp.Index, 'Time'),
f'DS{DS_i}-Theta_1'] = f"{time_theta[1]:g}"
df_db.loc[(cmp.Index, 'Time'),
f'DS{DS_i}-LongLeadTime'] = int(time_vals[5] > 0)
if ds_map.count('1') == 1:
ds_pure_id = ds_map[::-1].find('1') + 1
meta_data['DamageStates'].update({f"DS{DS_i}": {
"Description": f"Pure DS{ds_pure_id}. " +
cmp_meta[
f"DS_{ds_pure_id}_Description"],
"RepairAction":
cmp_meta[f"DS_{ds_pure_id}_Repair_Description"]
}})
else:
ds_combo = [f'DS{_.start() + 1}'
for _ in re.finditer('1', ds_map[::-1])]
meta_data['DamageStates'].update({f"DS{DS_i}": {
"Description": 'Combination of ' +
' & '.join(ds_combo),
"RepairAction": 'Combination of pure DS repair '
'actions.'
}})
# for every other component...
else:
# now look at each Damage State
for DS_i in range(1, | |
- m.x831 - m.x832 - m.x833 - m.x834 == 0)
m.c483 = Constraint(expr= m.x351 - m.x835 - m.x836 - m.x837 - m.x838 - m.x839 - m.x840 - m.x841 == 0)
m.c484 = Constraint(expr= m.x842 <= 50)
m.c485 = Constraint(expr= m.x843 <= 50)
m.c486 = Constraint(expr= m.x844 <= 50)
m.c487 = Constraint(expr= m.x845 <= 100)
m.c488 = Constraint(expr= m.x846 <= 100)
m.c489 = Constraint(expr= m.x847 <= 100)
m.c490 = Constraint(expr= m.x848 <= 100)
m.c491 = Constraint(expr= m.x849 <= 100)
m.c492 = Constraint(expr= m.x850 <= 100)
m.c493 = Constraint(expr= m.x853 <= 50)
m.c494 = Constraint(expr= m.x854 <= 50)
m.c495 = Constraint(expr= m.x855 <= 50)
m.c496 = Constraint(expr= m.x856 <= 100)
m.c497 = Constraint(expr= m.x857 <= 100)
m.c498 = Constraint(expr= m.x858 <= 100)
m.c499 = Constraint(expr= m.x859 <= 100)
m.c500 = Constraint(expr= m.x860 <= 100)
m.c501 = Constraint(expr= m.x861 <= 100)
m.c502 = Constraint(expr= m.x864 <= 50)
m.c503 = Constraint(expr= m.x865 <= 50)
m.c504 = Constraint(expr= m.x866 <= 50)
m.c505 = Constraint(expr= m.x867 <= 100)
m.c506 = Constraint(expr= m.x868 <= 100)
m.c507 = Constraint(expr= m.x869 <= 100)
m.c508 = Constraint(expr= m.x870 <= 100)
m.c509 = Constraint(expr= m.x871 <= 100)
m.c510 = Constraint(expr= m.x872 <= 100)
m.c511 = Constraint(expr= m.x875 <= 50)
m.c512 = Constraint(expr= m.x876 <= 50)
m.c513 = Constraint(expr= m.x877 <= 50)
m.c514 = Constraint(expr= m.x878 <= 100)
m.c515 = Constraint(expr= m.x879 <= 100)
m.c516 = Constraint(expr= m.x880 <= 100)
m.c517 = Constraint(expr= m.x881 <= 100)
m.c518 = Constraint(expr= m.x882 <= 100)
m.c519 = Constraint(expr= m.x883 <= 100)
m.c520 = Constraint(expr= m.x886 <= 50)
m.c521 = Constraint(expr= m.x887 <= 50)
m.c522 = Constraint(expr= m.x888 <= 50)
m.c523 = Constraint(expr= m.x889 <= 100)
m.c524 = Constraint(expr= m.x890 <= 100)
m.c525 = Constraint(expr= m.x891 <= 100)
m.c526 = Constraint(expr= m.x892 <= 100)
m.c527 = Constraint(expr= m.x893 <= 100)
m.c528 = Constraint(expr= m.x894 <= 100)
m.c529 = Constraint(expr= m.x897 >= 0)
m.c530 = Constraint(expr= m.x898 >= 0)
m.c531 = Constraint(expr= m.x899 >= 0)
m.c532 = Constraint(expr= m.x900 >= 0)
m.c533 = Constraint(expr= m.x901 >= 0)
m.c534 = Constraint(expr= m.x902 >= 0)
m.c535 = Constraint(expr= m.x903 >= 0)
m.c536 = Constraint(expr= m.x904 >= 0)
m.c537 = Constraint(expr= m.x905 >= 0)
m.c538 = Constraint(expr= m.x906 >= 0)
m.c539 = Constraint(expr= m.x907 >= 0)
m.c540 = Constraint(expr= m.x908 >= 0)
m.c541 = Constraint(expr= m.x909 >= 0)
m.c542 = Constraint(expr= m.x910 >= 0)
m.c543 = Constraint(expr= m.x911 >= 0)
m.c544 = Constraint(expr= m.x912 >= 0)
m.c545 = Constraint(expr= m.x913 >= 0)
m.c546 = Constraint(expr= m.x914 >= 0)
m.c547 = Constraint(expr= m.x915 >= 0)
m.c548 = Constraint(expr= m.x916 >= 0)
m.c549 = Constraint(expr= m.x917 >= 0)
m.c550 = Constraint(expr= m.x918 >= 0)
m.c551 = Constraint(expr= m.x919 >= 0)
m.c552 = Constraint(expr= m.x920 >= 0)
m.c553 = Constraint(expr= m.x921 >= 0)
m.c554 = Constraint(expr= m.x922 >= 0)
m.c555 = Constraint(expr= m.x923 >= 0)
m.c556 = Constraint(expr= m.x924 >= 0)
m.c557 = Constraint(expr= m.x925 >= 0)
m.c558 = Constraint(expr= m.x926 >= 0)
m.c559 = Constraint(expr= m.x927 >= 0)
m.c560 = Constraint(expr= m.x928 >= 0)
m.c561 = Constraint(expr= m.x929 >= 0)
m.c562 = Constraint(expr= m.x930 >= 0)
m.c563 = Constraint(expr= m.x931 >= 0)
m.c564 = Constraint(expr= m.x932 >= 0)
m.c565 = Constraint(expr= m.x933 >= 0)
m.c566 = Constraint(expr= m.x934 >= 0)
m.c567 = Constraint(expr= m.x935 >= 0)
m.c568 = Constraint(expr= m.x936 >= 0)
m.c569 = Constraint(expr= m.x937 >= 0)
m.c570 = Constraint(expr= m.x938 >= 0)
m.c571 = Constraint(expr= m.x939 >= 0)
m.c572 = Constraint(expr= m.x940 >= 0)
m.c573 = Constraint(expr= m.x941 >= 0)
m.c574 = Constraint(expr= m.x942 >= 0)
m.c575 = Constraint(expr= m.x943 >= 0)
m.c576 = Constraint(expr= m.x944 >= 0)
m.c577 = Constraint(expr= m.x945 >= 0)
m.c578 = Constraint(expr= m.x946 >= 0)
m.c579 = Constraint(expr= m.x947 >= 0)
m.c580 = Constraint(expr= m.x948 >= 0)
m.c581 = Constraint(expr= m.x949 >= 0)
m.c582 = Constraint(expr= m.x950 >= 0)
m.c583 = Constraint(expr= m.x951 >= 0)
m.c584 = Constraint(expr= m.x952 >= 0)
m.c585 = Constraint(expr= m.x953 >= 0)
m.c586 = Constraint(expr= m.x954 >= 0)
m.c587 = Constraint(expr= m.x955 >= 0)
m.c588 = Constraint(expr= m.x956 >= 0)
m.c589 = Constraint(expr= m.x957 >= 0)
m.c590 = Constraint(expr= m.x958 >= 0)
m.c591 = Constraint(expr= m.x959 >= 0)
m.c592 = Constraint(expr= m.x960 >= 0)
m.c593 = Constraint(expr= m.x961 >= 0)
m.c594 = Constraint(expr= m.x962 >= 0)
m.c595 = Constraint(expr= m.x963 >= 0)
m.c596 = Constraint(expr= m.x964 >= 0)
m.c597 = Constraint(expr= m.x965 >= 0)
m.c598 = Constraint(expr= m.x966 >= 0)
m.c599 = Constraint(expr= m.x967 >= 0)
m.c600 = Constraint(expr= m.x968 >= 0)
m.c601 = Constraint(expr= m.x969 >= 0)
m.c602 = Constraint(expr= m.x970 >= 0)
m.c603 = Constraint(expr= m.x971 >= 0)
m.c604 = Constraint(expr= m.x972 >= 0)
m.c605 = Constraint(expr= m.x973 >= 0)
m.c606 = Constraint(expr= m.x974 >= 0)
m.c607 = Constraint(expr= m.x975 >= 0)
m.c608 = Constraint(expr= m.x976 >= 0)
m.c609 = Constraint(expr= m.x977 >= 0)
m.c610 = Constraint(expr= m.x978 >= 0)
m.c611 = Constraint(expr= m.x979 >= 0)
m.c612 = Constraint(expr= m.x980 >= 0)
m.c613 = Constraint(expr= m.x981 >= 0)
m.c614 = Constraint(expr= m.x982 >= 0)
m.c615 = Constraint(expr= m.x983 >= 0)
m.c616 = Constraint(expr= m.x984 >= 0)
m.c617 = Constraint(expr= m.x985 >= 0)
m.c618 = Constraint(expr= m.x986 >= 0)
m.c619 = Constraint(expr= m.x987 >= 0)
m.c620 = Constraint(expr= m.x988 >= 0)
m.c621 = Constraint(expr= m.x989 >= 0)
m.c622 = Constraint(expr= m.x990 >= 0)
m.c623 = Constraint(expr= m.x991 >= 0)
m.c624 = Constraint(expr= m.x992 >= 0)
m.c625 = Constraint(expr= m.x993 >= 0)
m.c626 = Constraint(expr= m.x994 >= 0)
m.c627 = Constraint(expr= m.x995 >= 0)
m.c628 = Constraint(expr= m.x996 >= 0)
m.c629 = Constraint(expr= m.x997 >= 0)
m.c630 = Constraint(expr= m.x998 >= 0)
m.c631 = Constraint(expr= m.x999 >= 0)
m.c632 = Constraint(expr= m.x1000 >= 0)
m.c633 = Constraint(expr= m.x1001 >= 0)
m.c634 = Constraint(expr= m.x1002 >= 0)
m.c635 = Constraint(expr= m.x1003 >= 0)
m.c636 = Constraint(expr= m.x1004 >= 0)
m.c637 = Constraint(expr= m.x1005 >= 0)
m.c638 = Constraint(expr= m.x1006 >= 0)
m.c639 = Constraint(expr= m.x1007 >= 0)
m.c640 = Constraint(expr= m.x1008 >= 0)
m.c641 = Constraint(expr= m.x1009 >= 0)
m.c642 = Constraint(expr= m.x1010 >= 0)
m.c643 = Constraint(expr= m.x1011 >= 0)
m.c644 = Constraint(expr= m.x1012 >= 0)
m.c645 = Constraint(expr= m.x1013 >= 0)
m.c646 = Constraint(expr= m.x1014 >= 0)
m.c647 = Constraint(expr= m.x1015 >= 0)
m.c648 = Constraint(expr= m.x1016 >= 0)
m.c649 = Constraint(expr= m.x1017 >= 0)
m.c650 = Constraint(expr= m.x1018 >= 0)
m.c651 = Constraint(expr= m.x1019 >= 0)
m.c652 = Constraint(expr= m.x1020 >= 0)
m.c653 = Constraint(expr= m.x1021 >= 0)
m.c654 = Constraint(expr= m.x1022 >= 0)
m.c655 = Constraint(expr= m.x1023 >= 0)
m.c656 = Constraint(expr= m.x1024 >= 0)
m.c657 = Constraint(expr= m.x1025 >= 0)
m.c658 = Constraint(expr= m.x1026 >= 0)
m.c659 = Constraint(expr= m.x1027 >= 0)
m.c660 = Constraint(expr= m.x1028 >= 0)
m.c661 = Constraint(expr= m.x1029 >= 0)
m.c662 = Constraint(expr= m.x1030 >= 0)
m.c663 = Constraint(expr= m.x1031 >= 0)
m.c664 = Constraint(expr= m.x1032 >= 0)
m.c665 = Constraint(expr= m.x1033 >= 0)
m.c666 = Constraint(expr= m.x1034 >= 0)
m.c667 = Constraint(expr= m.x1035 >= 0)
m.c668 = Constraint(expr= m.x1036 >= 0)
m.c669 = Constraint(expr= m.x1037 >= 0)
m.c670 = Constraint(expr= m.x1038 >= 0)
m.c671 = Constraint(expr= m.x1039 >= 0)
m.c672 = Constraint(expr= m.x1040 >= 0)
m.c673 = Constraint(expr= m.x1041 >= 0)
m.c674 = Constraint(expr= m.x1042 >= 0)
m.c675 = Constraint(expr= m.x1043 >= 0)
m.c676 = Constraint(expr= m.x1044 >= 0)
m.c677 = Constraint(expr= m.x1045 >= 0)
m.c678 = Constraint(expr= m.x1046 >= 0)
m.c679 = Constraint(expr= m.x1047 >= 0)
m.c680 = Constraint(expr= m.x1048 >= 0)
m.c681 = Constraint(expr= m.x1049 >= 0)
m.c682 = Constraint(expr= m.x1050 >= 0)
m.c683 = Constraint(expr= m.x1051 >= 0)
m.c684 = Constraint(expr= m.x1052 >= 0)
m.c685 = Constraint(expr= m.x1053 >= 0)
m.c686 = Constraint(expr= m.x1054 >= 0)
m.c687 = Constraint(expr= m.x1055 >= 0)
m.c688 = Constraint(expr= m.x1056 >= 0)
m.c689 = Constraint(expr= m.x1057 >= 0)
m.c690 = Constraint(expr= m.x1058 >= 0)
m.c691 = Constraint(expr= m.x1059 >= 0)
m.c692 = Constraint(expr= m.x1060 >= 0)
m.c693 = Constraint(expr= m.x1061 >= 0)
m.c694 = Constraint(expr= m.x1062 >= 0)
m.c695 = Constraint(expr= m.x1063 >= 0)
m.c696 = Constraint(expr= m.x1064 >= 0)
m.c697 = Constraint(expr= m.x1065 >= 0)
m.c698 = Constraint(expr= m.x1066 >= 0)
m.c699 = Constraint(expr= m.x1067 >= 0)
m.c700 = Constraint(expr= m.x1068 >= 0)
m.c701 = Constraint(expr= m.x1069 >= 0)
m.c702 = Constraint(expr= m.x1070 >= 0)
m.c703 = Constraint(expr= m.x1071 >= 0)
m.c704 = Constraint(expr= m.x1072 >= 0)
m.c705 = Constraint(expr= m.x1073 >= 0)
m.c706 = Constraint(expr= m.x1074 >= 0)
m.c707 = Constraint(expr= m.x1075 >= 0)
m.c708 = Constraint(expr= m.x1076 >= 0)
m.c709 = Constraint(expr= m.x1077 >= 0)
m.c710 = Constraint(expr= m.x1078 >= 0)
m.c711 = Constraint(expr= m.x1079 >= 0)
m.c712 = Constraint(expr= m.x1080 >= 0)
m.c713 = Constraint(expr= m.x1081 >= 0)
m.c714 = Constraint(expr= m.x1082 >= 0)
m.c715 = Constraint(expr= m.x1083 >= 0)
m.c716 = Constraint(expr= m.x1084 >= 0)
m.c717 = Constraint(expr= m.x1085 >= 0)
m.c718 = Constraint(expr= m.x1086 >= 0)
m.c719 = Constraint(expr= m.x1087 >= 0)
m.c720 = Constraint(expr= m.x1088 >= 0)
m.c721 = Constraint(expr= m.x1089 >= 0)
m.c722 = Constraint(expr= m.x1090 >= 0)
m.c723 = Constraint(expr= m.x1091 >= 0)
m.c724 = Constraint(expr= m.x1092 >= 0)
m.c725 = Constraint(expr= m.x1093 >= 0)
m.c726 = Constraint(expr= m.x1094 >= 0)
m.c727 = Constraint(expr= m.x1095 >= 0)
m.c728 = Constraint(expr= m.x1096 >= 0)
m.c729 = Constraint(expr= m.x1097 >= 0)
m.c730 = Constraint(expr= m.x1098 >= 0)
m.c731 = Constraint(expr= m.x1099 >= 0)
m.c732 = Constraint(expr= m.x1100 >= 0)
m.c733 = Constraint(expr= m.x1101 >= 0)
m.c734 = | |
QuantumCircuit
from qiskit.tools.visualization import circuit_drawer
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.measure(q, c)
circuit_drawer(qc)
"""
image = None
config = user_config.get_config()
# Get default from config file else use text
default_output = 'text'
if config:
default_output = config.get('circuit_drawer', 'text')
if default_output == 'auto':
if _matplotlib.HAS_MATPLOTLIB:
default_output = 'mpl'
else:
default_output = 'text'
if output is None:
output = default_output
if output == 'text':
return _text_circuit_drawer(circuit, filename=filename,
reverse_bits=reverse_bits,
plot_barriers=plot_barriers,
justify=justify,
vertical_compression=vertical_compression,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold,
initial_state=initial_state,
cregbundle=cregbundle)
elif output == 'latex':
image = _latex_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
initial_state=initial_state,
cregbundle=cregbundle)
elif output == 'latex_source':
return _generate_latex_source(circuit,
filename=filename, scale=scale,
style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
initial_state=initial_state,
cregbundle=cregbundle)
elif output == 'mpl':
image = _matplotlib_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold,
ax=ax)
else:
raise exceptions.VisualizationError(
'Invalid output type %s selected. The only valid choices '
'are latex, latex_source, text, and mpl' % output)
if image and interactive:
image.show()
return image
# -----------------------------------------------------------------------------
# Plot style sheet option
# -----------------------------------------------------------------------------
def qx_color_scheme():
"""Return default style for matplotlib_circuit_drawer (IBM QX style)."""
warn('The qx_color_scheme function is deprecated as of 0.11, and '
'will be removed no earlier than 3 months after that release '
'date.', DeprecationWarning, stacklevel=2)
return {
"comment": "Style file for matplotlib_circuit_drawer (IBM QX Composer style)",
"textcolor": "#000000",
"gatetextcolor": "#000000",
"subtextcolor": "#000000",
"linecolor": "#000000",
"creglinecolor": "#b9b9b9",
"gatefacecolor": "#ffffff",
"barrierfacecolor": "#bdbdbd",
"backgroundcolor": "#ffffff",
"fold": 20,
"fontsize": 13,
"subfontsize": 8,
"figwidth": -1,
"dpi": 150,
"displaytext": {
"id": "id",
"u0": "U_0",
"u1": "U_1",
"u2": "U_2",
"u3": "U_3",
"x": "X",
"y": "Y",
"z": "Z",
"h": "H",
"s": "S",
"sdg": "S^\\dagger",
"t": "T",
"tdg": "T^\\dagger",
"rx": "R_x",
"ry": "R_y",
"rz": "R_z",
"reset": "\\left|0\\right\\rangle"
},
"displaycolor": {
"id": "#ffca64",
"u0": "#f69458",
"u1": "#f69458",
"u2": "#f69458",
"u3": "#f69458",
"x": "#a6ce38",
"y": "#a6ce38",
"z": "#a6ce38",
"h": "#00bff2",
"s": "#00bff2",
"sdg": "#00bff2",
"t": "#ff6666",
"tdg": "#ff6666",
"rx": "#ffca64",
"ry": "#ffca64",
"rz": "#ffca64",
"reset": "#d7ddda",
"target": "#00bff2",
"meas": "#f070aa"
},
"latexdrawerstyle": True,
"usepiformat": False,
"cregbundle": False,
"showindex": False,
"compress": True,
"margin": [2.0, 0.0, 0.0, 0.3],
"creglinestyle": "solid",
"reversebits": False
}
# -----------------------------------------------------------------------------
# _text_circuit_drawer
# -----------------------------------------------------------------------------
def _text_circuit_drawer(circuit, filename=None, reverse_bits=False,
plot_barriers=True, justify=None, vertical_compression='high',
idle_wires=True, with_layout=True, fold=None, initial_state=True,
cregbundle=False):
"""Draws a circuit using ascii art.
Args:
circuit (QuantumCircuit): Input circuit
filename (str): optional filename to write the result
reverse_bits (bool): Rearrange the bits in reverse order.
plot_barriers (bool): Draws the barriers when they are there.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
vertical_compression (string): `high`, `medium`, or `low`. It merges the
lines so the drawing will take less vertical room. Default is `high`.
idle_wires (bool): Include idle wires. Default is True.
with_layout (bool): Include layout information, with labels on the physical
layout. Default: True
fold (int): Optional. Breaks the circuit drawing to this length. This
useful when the drawing does not fit in the console. If
None (default), it will try to guess the console width using
`shutil.get_terminal_size()`. If you don't want pagination
at all, set `fold=-1`.
initial_state (bool): Optional. Adds |0> in the beginning of the line. Default: `True`.
cregbundle (bool): Optional. If set True bundle classical registers. Only used by
the ``text`` output. Default: ``False``.
Returns:
TextDrawing: An instances that, when printed, draws the circuit in ascii art.
"""
qregs, cregs, ops = utils._get_layered_instructions(circuit,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires)
if with_layout:
layout = circuit._layout
else:
layout = None
text_drawing = _text.TextDrawing(qregs, cregs, ops, layout=layout, initial_state=initial_state,
cregbundle=cregbundle)
text_drawing.plotbarriers = plot_barriers
text_drawing.line_length = fold
text_drawing.vertical_compression = vertical_compression
if filename:
text_drawing.dump(filename)
return text_drawing
# -----------------------------------------------------------------------------
# latex_circuit_drawer
# -----------------------------------------------------------------------------
def _latex_circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
plot_barriers=True,
reverse_bits=False,
justify=None,
idle_wires=True,
with_layout=True,
initial_state=False,
cregbundle=False):
"""Draw a quantum circuit based on latex (Qcircuit package)
Requires version >=2.6.0 of the qcircuit LaTeX package.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
idle_wires (bool): Include idle wires. Default is True.
with_layout (bool): Include layout information, with labels on the physical
layout. Default: True
initial_state (bool): Optional. Adds |0> in the beginning of the line. Default: `False`.
cregbundle (bool): Optional. If set True bundle classical registers.
Default: ``False``.
Returns:
PIL.Image: an in-memory representation of the circuit diagram
Raises:
OSError: usually indicates that ```pdflatex``` or ```pdftocairo``` is
missing.
CalledProcessError: usually points errors during diagram creation.
ImportError: if pillow is not installed
"""
tmpfilename = 'circuit'
with tempfile.TemporaryDirectory() as tmpdirname:
tmppath = os.path.join(tmpdirname, tmpfilename + '.tex')
_generate_latex_source(circuit, filename=tmppath,
scale=scale, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits, justify=justify,
idle_wires=idle_wires, with_layout=with_layout,
initial_state=initial_state,
cregbundle=cregbundle)
try:
subprocess.run(["pdflatex", "-halt-on-error",
"-output-directory={}".format(tmpdirname),
"{}".format(tmpfilename + '.tex')],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
check=True)
except OSError as ex:
if ex.errno == errno.ENOENT:
logger.warning('WARNING: Unable to compile latex. '
'Is `pdflatex` installed? '
'Skipping latex circuit drawing...')
raise
except subprocess.CalledProcessError as ex:
with open('latex_error.log', 'wb') as error_file:
error_file.write(ex.stdout)
logger.warning('WARNING Unable to compile latex. '
'The output from the pdflatex command can '
'be found in latex_error.log')
raise
else:
if not HAS_PIL:
raise ImportError('The latex drawer needs pillow installed. '
'Run "pip install pillow" before using the '
'latex drawer.')
try:
base = os.path.join(tmpdirname, tmpfilename)
subprocess.run(["pdftocairo", "-singlefile", "-png", "-q",
base + '.pdf', base], check=True)
image = Image.open(base + '.png')
image = utils._trim(image)
os.remove(base + '.png')
if filename:
image.save(filename, 'PNG')
except (OSError, subprocess.CalledProcessError) as ex:
logger.warning('WARNING: Unable to convert pdf to image. '
'Is `poppler` installed? '
'Skipping circuit drawing...')
raise
return image
def _generate_latex_source(circuit, filename=None,
scale=0.7, style=None, reverse_bits=False,
plot_barriers=True, justify=None, idle_wires=True,
with_layout=True, initial_state=False, cregbundle=False):
"""Convert QuantumCircuit to LaTeX string.
Args:
circuit (QuantumCircuit): input circuit
scale (float): image scaling
filename (str): optional filename to write latex
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
idle_wires (bool): Include idle wires. Default is True.
with_layout (bool): Include layout information, with labels on the physical
layout. Default: True
initial_state (bool): Optional. Adds |0> in the beginning of the line. Default: `False`.
cregbundle (bool): Optional. If set True bundle classical registers.
Default: ``False``.
Returns:
str: Latex string appropriate for writing to file.
"""
qregs, cregs, ops = utils._get_layered_instructions(circuit,
reverse_bits=reverse_bits,
justify=justify, idle_wires=idle_wires)
if with_layout:
layout = circuit._layout
else:
layout = None
qcimg = _latex.QCircuitImage(qregs, cregs, ops, scale, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits, layout=layout,
initial_state=initial_state,
cregbundle=cregbundle)
latex = qcimg.latex()
if filename:
with open(filename, 'w') as latex_file:
latex_file.write(latex)
return latex
# -----------------------------------------------------------------------------
# matplotlib_circuit_drawer
# -----------------------------------------------------------------------------
def _matplotlib_circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
plot_barriers=True,
reverse_bits=False,
justify=None,
idle_wires=True,
with_layout=True,
fold=None,
ax=None):
"""Draw a quantum circuit based on matplotlib.
If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.
We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str): `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
idle_wires (bool): Include idle wires. Default is True.
with_layout (bool): Include layout information, with labels on the physical
layout. Default: True.
fold (int): amount ops allowed before folding. Default is 25.
ax (matplotlib.axes.Axes): An optional Axes object to be used for
the visualization output. If none is specified a new matplotlib
Figure will be created and used. Additionally, if specified | |
<reponame>OdedH/CLIP-ViL
# coding=utf-8
# Copyleft 2019 project LXRT.
import collections
import os
import random
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from param import args
from pretrain.lxmert_data import InputExample, LXMERTDataset, LXMERTTorchDataset, LXMERTEvaluator
from lxrt.entry import set_visual_config
from lxrt.tokenization import BertTokenizer
from lxrt.modeling import LXRTPretraining
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
from src.tasks.vision_helpers import GroupedBatchSampler, create_aspect_ratio_groups_cache
from lxrt.visual_transformers import adjust_learning_rate
from src.tools.load_stagte_dict import load_state_dict_flexible_with_fp16, load_state_dict_flexible
import gc
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
DataTuple = collections.namedtuple("DataTuple", 'dataset torchdset loader evaluator')
if args.distributed:
dist.init_process_group(backend='nccl')
torch.cuda.set_device(args.local_rank)
args.gpus = torch.cuda.device_count()
args.world_size = args.gpus * args.nodes
args.gpus = torch.cuda.device_count()
args.gpu = args.local_rank if args.local_rank != -1 else 0
args.device = torch.device("cuda", args.gpu)
def get_tuple(splits: str, bs: int, shuffle=False, drop_last=False, topk=-1, distributed = False, aspect_ratio_group_factor= -1) -> DataTuple:
# Decide which QA datasets would be used in pre-training.
# Options: vqa, gqa, visual7w
# Note: visual7w is a part of vgqa, we take the name here.
qa_sets = args.qa_sets
if qa_sets is not None:
qa_sets = set(qa_set.lower().strip() for qa_set in qa_sets.split(","))
# Build dataset, data loader, and evaluator.
dset = LXMERTDataset(splits, qa_sets=qa_sets)
tset = LXMERTTorchDataset(dset, topk)
if distributed:
train_sampler = DistributedSampler(
tset,
num_replicas=args.world_size,
rank=args.local_rank,
shuffle=shuffle,
)
else:
train_sampler = torch.utils.data.RandomSampler(tset)
if not shuffle:
train_sampler = torch.utils.data.SequentialSampler(tset)
if aspect_ratio_group_factor >= 0:
group_ids = create_aspect_ratio_groups_cache(tset, k=args.aspect_ratio_group_factor)
train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, bs)
else:
train_batch_sampler = torch.utils.data.BatchSampler(
train_sampler, bs, drop_last=True)
data_loader = DataLoader(
tset,
batch_sampler=train_batch_sampler,
num_workers=args.num_workers,
collate_fn=tset.collate_fn,
pin_memory=True
)
evaluator = LXMERTEvaluator(dset)
print()
return DataTuple(dataset=dset, torchdset=tset, loader=data_loader, evaluator=evaluator)
train_tuple = get_tuple(args.train, args.batch_size, shuffle=True, drop_last=True, distributed=args.distributed, aspect_ratio_group_factor = args.aspect_ratio_group_factor)
valid_batch_size = 16 if args.multiGPU else 16
valid_tuple = get_tuple(args.valid, valid_batch_size, shuffle=False, drop_last=False, topk=5000)
LOSSES_NAME = ('Mask_LM', 'Matched', 'Obj', 'Attr', 'Feat', 'QA')
def to_gpu(tensor, device = None):
if tensor is not None and isinstance(tensor, torch.Tensor):
if device is not None:
return tensor.to(device)
else:
return tensor.cuda()
return tensor
class LXMERT:
def __init__(self, max_seq_length):
super().__init__()
self.max_seq_length = max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
# Build model
set_visual_config(args)
self.model = LXRTPretraining.from_pretrained(
"bert-base-uncased",
task_mask_lm=args.task_mask_lm,
task_obj_predict=args.task_obj_predict,
task_matched=args.task_matched,
task_qa=args.task_qa,
visual_losses=args.visual_losses,
num_answers=train_tuple.dataset.answer_table.num_answers
)
# Weight initialization and loading
if args.from_scratch:
print("Train from Scratch: re-initialize all BERT weights.")
self.model.apply(self.model.init_bert_weights)
if args.load_lxmert is not None:
# Load lxmert would not load the answer head.
self.load_lxmert(args.load_lxmert)
#print(list(state_dict))
self.model = self.model.to(args.device)
if args.distributed:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
from transformers import AdamW, get_linear_schedule_with_warmup
if args.use_separate_optimizer_for_visual:
from lxrt.visual_transformers import FusedOptimizer
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if ( (not any(nd in n for nd in no_decay)) and ("visual_model" not in n) ) ],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if ( (any(nd in n for nd in no_decay)) and ("visual_model" not in n ))],
"weight_decay": 0.0,
},
]
optim = AdamW(optimizer_grouped_parameters,
lr=args.lr,
#betas=(0.9, 0.98),
eps=args.adam_epsilon)
#sgd_parameters = self.model.bert.encoder.visual_model.parameters()
if args.use_adam_for_visual:
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.bert.encoder.visual_model.named_parameters() if ( (not any(nd in n for nd in no_decay)) and ("visual_model" not in n) ) ],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in self.model.bert.encoder.visual_model.named_parameters() if ( (any(nd in n for nd in no_decay)) and ("visual_model" not in n ))],
"weight_decay": 0.0,
},
]
sgd = AdamW(optimizer_grouped_parameters,
lr=args.sgd_lr,
#betas=(0.9, 0.98),
eps=args.adam_epsilon)
else:
sgd = torch.optim.SGD(self.model.bert.encoder.visual_model.parameters(), args.sgd_lr,
momentum=args.sgd_momentum,
weight_decay=args.sgd_weight_decay)
self.optim = FusedOptimizer([optim, sgd])
batch_per_epoch = len(train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs) // args.gradient_accumulation_steps
self.scheduler = get_linear_schedule_with_warmup(
optim, num_warmup_steps=args.warmup_ratio * t_total, num_training_steps=t_total)
else:
self.optim = AdamW(optimizer_grouped_parameters,
lr=args.lr,
#betas=(0.9, 0.98),
eps=args.adam_epsilon)
batch_per_epoch = len(train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs) // args.gradient_accumulation_steps
self.scheduler = get_linear_schedule_with_warmup(
self.optim, num_warmup_steps=args.warmup_ratio * t_total, num_training_steps=t_total
)
if args.fp16:
if args.use_separate_optimizer_for_visual:
self.model, [optim, sgd] = amp.initialize(self.model, self.optim.optimizers, enabled=args.fp16, opt_level=args.fp16_opt_level)
self.optim = FusedOptimizer([optim, sgd])
else:
self.model, self.optim = amp.initialize(self.model, self.optim, enabled=args.fp16, opt_level=args.fp16_opt_level)
from apex.parallel import DistributedDataParallel as DDP
self.model = DDP(self.model)
else:
self.model = torch.nn.parallel.DistributedDataParallel(
self.model, device_ids=[args.gpu], find_unused_parameters=True
)
else:
# GPU Options
if args.multiGPU:
self.model = nn.DataParallel(self.model)
# Optimizer
from lxrt.optimization import BertAdam
batch_per_epoch = len(train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs)
warmup_ratio = 0.05
warmup_iters = int(t_total * warmup_ratio)
print("Batch per epoch: %d" % batch_per_epoch)
print("Total Iters: %d" % t_total)
print("Warm up Iters: %d" % warmup_iters)
self.optim = BertAdam(self.model.parameters(), lr=args.lr, warmup=warmup_ratio, t_total=t_total)
if args.load is not None:
self.load(args.load)
torch.cuda.empty_cache()
gc.collect()
def forward(self, examples):
'''train_features = [convert_example_to_features(example, self.max_seq_length, self.tokenizer)
for example in examples]
# language Inputs
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long).cuda()
# Visual Inputs
feats = torch.from_numpy(np.stack([f.visual_feats[0] for f in train_features])).cuda()
pos = torch.from_numpy(np.stack([f.visual_feats[1] for f in train_features])).cuda()
# Language Prediction
lm_labels = torch.tensor([f.lm_label_ids for f in train_features], dtype=torch.long).cuda()
# Visual Prediction
obj_labels = {}
for key in ('obj', 'attr', 'feat'):
visn_labels = torch.from_numpy(np.stack([f.obj_labels[key][0] for f in train_features])).cuda()
visn_mask = torch.from_numpy(np.stack([f.obj_labels[key][1] for f in train_features])).cuda()
assert visn_labels.size(0) == visn_mask.size(0) and visn_labels.size(1) == visn_mask.size(1)
obj_labels[key] = (visn_labels, visn_mask)
# Joint Prediction
matched_labels = torch.tensor([f.is_matched for f in train_features], dtype=torch.long).cuda()
ans = torch.from_numpy(np.stack([f.ans for f in train_features])).cuda() '''
"""
forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
visual_feats=None, pos=None, obj_labels=None, matched_label=None, ans=None):
"""
new_examples = {}
for key in list(examples.keys()):
if key != "uid":
new_examples[key] = to_gpu(examples[key])
loss, losses, ans_logit = self.model(
**new_examples
)
return loss, losses.detach().cpu(), ans_logit
def valid_batch(self, batch):
with torch.no_grad():
loss, losses, ans_logit = self.forward(batch)
if args.multiGPU:
loss = loss.mean()
losses = losses.mean(0)
return loss.item(), losses.cpu().numpy(), ans_logit
def train(self, train_tuple: DataTuple, eval_tuple: DataTuple):
train_ld = train_tuple.loader
# Train
best_eval_loss = 9595.
for epoch in range(args.start_epoch, args.epochs):
# Train
self.model.train()
total_loss = 0.
total_losses = 0.
uid2ans = {}
from utils import TrainingMeter
train_meter = TrainingMeter()
if args.use_separate_optimizer_for_visual:
adjust_learning_rate(self.optim.optimizers[-1], epoch, args)
for i, batch in enumerate(tqdm(train_ld, total=len(train_ld))):
if args.skip_training and i == 4:
break
loss, losses, ans_logit = self.forward(batch)
if args.multiGPU:
loss = loss.mean()
losses = losses.squeeze(0)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
if args.use_separate_optimizer_for_visual:
with amp.scale_loss(loss, self.optim.optimizers) as scaled_loss:
scaled_loss.backward()
else:
with amp.scale_loss(loss, self.optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
total_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(self.optim), args.max_grad_norm)
else:
total_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), args.max_grad_norm)
self.optim.step()
if args.distributed:
self.scheduler.step() # Update learning rate schedule
self.model.zero_grad()
#self.optim.step()
loss = loss.item()
losses = losses.cpu().numpy()
logit = ans_logit
total_loss += loss
total_losses += losses
if args.task_qa:
score, label = logit.max(1)
for uid, l in zip(batch["uid"], label.cpu().numpy()):
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
train_meter.update(
{'totol_loss': loss*args.gradient_accumulation_steps,
"masked_lm": losses[0],
"matched": losses[1],
"qa_loss": losses[2] if len(losses) == 3 else 0.0,
}
)
if i != 0 and i % args.report_step == 0 and args.local_rank <= 0:
print("Epoch {}, Training Step {} of {}".format(epoch, i // args.gradient_accumulation_steps, len(train_ld) // args.gradient_accumulation_steps ))
train_meter.report()
train_meter.clean()
if i != 0 and args.save_step != -1 and (i // args.gradient_accumulation_steps) % args.save_step == 0 and args.local_rank <= 0:
self.save("Epoch{}Step{}".format(epoch+1, i // args.gradient_accumulation_steps ))
#if args.task_qa:
# train_tuple.evaluator.evaluate(uid2ans, pprint=True)
# Save
if args.local_rank <= 0:
self.save("Epoch%02d" % (epoch+1))
# Eval
#avg_eval_loss = self.evaluate_epoch(eval_tuple, iters=-1)
def evaluate_epoch(self, eval_tuple: DataTuple, iters: int=-1):
self.model.eval()
eval_ld = eval_tuple.loader
total_loss = 0.
total_losses = 0.
uid2ans = {}
for i, batch in enumerate(tqdm(eval_ld)):
loss, losses, logit = self.valid_batch(batch)
total_loss += loss
total_losses += losses
if args.task_qa:
score, label = logit.max(1)
for uid, l in zip(batch["uid"], label.cpu().numpy()):
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
if i == iters:
break
if args.local_rank <= 0:
print("The valid loss is %0.4f" % (total_loss / len(eval_ld)))
losses_str = "The losses are "
total_losses = total_losses.squeeze(0)
for name, loss in zip(LOSSES_NAME, total_losses / len(eval_ld)):
losses_str += "%s: %0.4f " % (name, loss)
print(losses_str)
if args.task_qa:
eval_tuple.evaluator.evaluate(uid2ans, pprint=True)
return total_loss / len(eval_ld)
def save(self, name):
torch.save(self.model.state_dict(),
os.path.join(args.output, "%s_LXRT.pth" % name))
if args.use_separate_optimizer_for_visual:
torch.save(self.optim.optimizers[0].state_dict(), os.path.join(args.output, "%s_LXRT_AdamOptim.pth" % name))
torch.save(self.optim.optimizers[1].state_dict(), os.path.join(args.output, "%s_LXRT_SGDOptim.pth" % name))
else:
torch.save(self.optim.state_dict(), os.path.join(args.output, "%s_LXRT_AdamOptim.pth" | |
import pandas as pd
from Bio.PDB import Selection, PDBParser, Superimposer
# from Bio.PDB.vectors import rotmat, Vector
import numpy as np
import h5py
from tqdm import tqdm
import os
import multiprocessing as mp
"""
PDB file --> beads center DataFrame --> local structure --> rotated local structure
Functions in this version can handle multiple chains PDB file.
"""
def extract_beads(pdb_id, decoy, decoy_set='3DRobot_set'):
amino_acids = pd.read_csv('data/amino_acids.csv')
vocab_aa = [x.upper() for x in amino_acids.AA3C]
vocab_dict = {x.upper(): y for x, y in zip(amino_acids.AA3C, amino_acids.AA)}
p = PDBParser()
structure = p.get_structure('X', f'data/decoys/{decoy_set}/{pdb_id}/{decoy}.pdb')
residue_list = Selection.unfold_entities(structure, 'R')
ca_center_list = []
cb_center_list = []
res_name_list = []
res_num_list = []
chain_list = []
for res in residue_list:
if res.get_resname() not in vocab_aa:
# raise ValueError('protein has non natural amino acids')
continue
chain_list.append(res.parent.id)
res_name_list.append(vocab_dict[res.get_resname()])
res_num_list.append(res.id[1])
ca_center_list.append(res['CA'].get_coord())
if res.get_resname() != 'GLY':
cb_center_list.append(res['CB'].get_coord())
else:
cb_center_list.append(res['CA'].get_coord())
ca_center = np.vstack(ca_center_list)
cb_center = np.vstack(cb_center_list)
df = pd.DataFrame({'chain_id': chain_list,
'group_num': res_num_list,
'group_name': res_name_list,
'x': ca_center[:, 0],
'y': ca_center[:, 1],
'z': ca_center[:, 2],
'xcb': cb_center[:, 0],
'ycb': cb_center[:, 1],
'zcb': cb_center[:, 2]})
df.to_csv(f'data/decoys/{decoy_set}/{pdb_id}/{decoy}_bead.csv', index=False)
def _rotation_matrix(c1, c2):
z = np.cross(c1, c2)
x = c1
y = np.cross(z, x)
x = x / np.sqrt(np.sum(x ** 2))
y = y / np.sqrt(np.sum(y ** 2))
z = z / np.sqrt(np.sum(z ** 2))
R = np.vstack([x, y, z])
# Rinv = np.linalg.inv(R.T)
return R
def extract_one_topk(pdb_id, decoy, k=10,
decoy_set='3DRobot_set', profile_set='pdb_profile_validation',
reorder=True):
# df_profile = pd.read_csv(f'data/decoys/3DRobot_set/pdb_profile_training_100/{pdb_id}_profile.csv')
df_profile = pd.read_csv(f'data/decoys/{decoy_set}/{profile_set}/{pdb_id}_profile.csv')
df_coords = pd.read_csv(f'data/decoys/{decoy_set}/{pdb_id}/{decoy}_bead.csv')
# group_num and evo_profile should be un_masked. they trace the original sequence (peptide-bond connection)
group_num = np.arange(df_profile.shape[0])
evo_profile = df_profile[[f'aa{i}' for i in range(20)]].values
if len(group_num) < 20:
return 0
idx = df_profile['mask'] == 1
if (decoy_set == 'casp11') & (decoy != f'{pdb_id}.native'):
df_coords = df_coords[idx]
seq = ''.join(df_profile[idx]['seq'])
seq2 = ''.join(df_coords['group_name'])
if (df_profile[idx].shape[0] != df_coords.shape[0]) | (seq != seq2):
print('PDB and profile shape do not match')
return 0
group_num = group_num[idx]
group_name = df_profile['seq'].values[idx]
group_coords = df_coords[['x', 'y', 'z']].values # coords may have missing residues.
df_list = []
for i, gc in enumerate(group_num):
if (gc-1 not in group_num) | (gc+1 not in group_num) | (gc-2 not in group_num) | (gc+2 not in group_num):
continue
# coords of the previous 2 and next 2 groups in local peptide segment
cen_i = (group_num == gc)
pre_i = (group_num == gc-1)
next_i = (group_num == gc+1)
pre2_i = (group_num == gc-2)
next2_i = (group_num == gc+2)
coords = group_coords - group_coords[cen_i] # center
c1 = coords[pre_i]
c2 = coords[next_i]
if np.sum(c1**2) == 0:
break
if np.sum(c2**2) == 0:
break
rotate_mat = _rotation_matrix(c1, c2)
# get central segment
ind = (cen_i | pre_i | next_i | pre2_i | next2_i)
gnum_seg = group_num[ind]
gname_seg = group_name[ind]
coords_seg = coords[ind]
coords_seg = np.squeeze(np.matmul(rotate_mat[None, :, :], coords_seg[:, :, None]))
# get nearest k residues from other residues
gnum_others = group_num[~ind]
gname_others = group_name[~ind]
coords_others = coords[~ind]
dist_i = np.sqrt((coords_others**2).sum(axis=1))
dist_i_arg = np.argsort(dist_i)
topk_arg = dist_i_arg[:k]
count_8a = dist_i[dist_i < 8].shape[0]
count_10a = dist_i[dist_i < 10].shape[0]
count_12a = dist_i[dist_i < 12].shape[0]
gnum_topk = gnum_others[topk_arg]
gname_topk = gname_others[topk_arg]
coords_topk = coords_others[topk_arg]
coords_topk = np.squeeze(np.matmul(rotate_mat[None, :, :], coords_topk[:, :, None]))
# concat central segment and top_k
gnum = np.append(gnum_seg, gnum_topk)
gname = np.append(gname_seg, gname_topk)
coords = np.vstack((coords_seg, coords_topk))
distance = np.sqrt(np.sum(coords**2, axis=1))
segment_info = np.ones(gnum.shape[0], dtype=int) * 5
segment_info[gnum == gc] = 0
segment_info[gnum == gc-1] = 1
segment_info[gnum == gc+1] = 2
segment_info[gnum == gc-2] = 3
segment_info[gnum == gc+2] = 4
df_g = pd.DataFrame({'center_num': gc,
'group_num': gnum,
'group_name': gname,
'x': coords[:, 0],
'y': coords[:, 1],
'z': coords[:, 2],
'distance': distance,
'segment': segment_info,
'count8a': count_8a,
'count10a': count_10a,
'count12a': count_12a})
df_g = df_g.sort_values(by=['segment', 'distance'])
if reorder:
df_g = df_g.sort_values(by=['segment', 'group_num'])
gnum = df_g['group_num'].values
distance = df_g['distance'].values
# set segment id
seg = np.ones(15, dtype=np.int)
seg[5] = 2
for i in range(6, 15):
if gnum[i] == gnum[i - 1] + 1:
seg[i] = seg[i - 1]
else:
seg[i] = seg[i - 1] + 1
# calculate mean distance of segment
seg_dist = np.zeros(15)
for i in range(5, 15):
seg_dist[i] = distance[seg == seg[i]].mean()
df_g['seg'] = seg
df_g['seg_dist'] = seg_dist
df_g = df_g.sort_values(by=['segment', 'seg_dist', 'group_num'])
df_list.append(df_g)
if len(df_list) > 0:
df = pd.concat(df_list, ignore_index=True)
group_profile = evo_profile[df['group_num'].values]
for i in range(20):
df[f'aa{i}'] = group_profile[:, i]
# df.to_csv(f'data/decoys/3DRobot_set/{pdb_id}/{decoy}_local_rot.csv', index=False, float_format='%.3f')
amino_acids = pd.read_csv('data/amino_acids.csv')
vocab = {x.upper(): y - 1 for x, y in zip(amino_acids.AA, amino_acids.idx)}
k = 15
seq = df['group_name'].apply(lambda x: vocab[x])
seq = seq.values.reshape((-1, k))
coords = df[['x', 'y', 'z']].values.reshape((-1, k, 3))
profile = df[[f'aa{i}' for i in range(20)]].values.reshape((-1, k, 20))
res_counts = df[['count8a', 'count10a', 'count12a']].values.reshape(-1, 15, 3)[:, 0, :]
if reorder:
group_num = df['group_num'].values.reshape((-1, k))
seg = df['seg'].values
seg = seg.reshape(-1, k)
start_id = np.zeros_like(seg)
idx = (seg[:, 1:] - seg[:, :-1] == 0)
start_id[:, 1:][idx] = 1
decoy_file_name = f'{decoy}_local_rot_CA.h5'
else:
decoy_file_name = f'{decoy}_local_rot.h5'
with h5py.File(f'data/decoys/{decoy_set}/{pdb_id}/{decoy_file_name}', 'w') as f:
dset = f.create_dataset("seq", shape=seq.shape, data=seq, dtype='i')
dset = f.create_dataset("coords", shape=coords.shape, data=coords, dtype='f4')
dset = f.create_dataset("profile", shape=profile.shape, data=profile, dtype='f4')
dset = f.create_dataset("res_counts", shape=res_counts.shape, data=res_counts, dtype='i')
if reorder:
dset = f.create_dataset("group_num", shape=group_num.shape, data=group_num, dtype='i')
dset = f.create_dataset("start_id", shape=start_id.shape, data=start_id, dtype='i')
else:
print(group_name)
return 1
def check_3drobot_bead_profile():
pdb_list = pd.read_csv(f'data/decoys/3DRobot_set/pdb_profile_diff.txt')['pdb'].values
decoy_set = '3DRobot_set'
profile_set = 'pdb_profile_training_100'
# pdb_list = pd.read_csv(f'data/decoys/casp11/pdb_no_need_copy_native.txt')['pdb'].values
# decoy_set = 'casp11'
# profile_set = 'pdb_profile'
pdb_list_good = []
for pdb_id in tqdm(pdb_list):
df_profile = pd.read_csv(f'data/decoys/{decoy_set}/{profile_set}/{pdb_id}_profile.csv')
df = pd.read_csv(f'data/decoys/{decoy_set}/{pdb_id}/list.txt', sep='\s+')
decoy_list = df['NAME'].values
# df_coords = pd.read_csv(f'data/decoys/{decoy_set}/{pdb_id}/{pdb_id}.native_bead.csv')
bad_count = 0
for decoy in decoy_list:
df_coords = pd.read_csv(f'data/decoys/{decoy_set}/{pdb_id}/{decoy[:-4]}_bead.csv')
if df_profile.shape[0] != df_coords.shape[0]:
bad_count += 1
if bad_count == 0:
pdb_list_good.append(pdb_id)
pd.DataFrame({'pdb': pdb_list_good}).to_csv(f'data/decoys/{decoy_set}/pdb_profile_diff_match.txt', index=False)
def extract_decoy_set_3drobot(pdb_list):
decoy_set = '3DRobot_set'
profile_set = 'pdb_profile_training_100'
# pdb_list = pd.read_csv(f'data/decoys/3DRobot_set/pdb_profile_diff.txt')['pdb'].values
# pdb_list = ['1HF2A']
# for pdb_id in pdb_list:
# df_profile = pd.read_csv(f'data/decoys/{decoy_set}/{profile_set}/{pdb_id}_profile.csv')
# df_coords = pd.read_csv(f'data/decoys/{decoy_set}/{pdb_id}/native_bead.csv')
# seq = ''.join(df_profile['seq'])
# mask = df_profile['mask']
# seq2 = ''.join(df_coords['group_name'])
#
# if (df_profile.shape[0] != df_coords.shape[0]) | (seq != seq2):
# # print(f'{pdb_id} PDB and profile shape do not match')
# idx = (mask == 1)
# seq_mask = ''.join(df_profile[idx]['seq'])
# if (df_profile[idx].shape[0] != df_coords.shape[0]) | (seq_mask != seq2):
# print(f'{pdb_id} PDB and masked profile shape do not match')
for pdb_id in pdb_list:
# ignore 1TJXA 2J1VA, some residues don't have CA.
# decoy_list = ['native', 'decoy46_42']
decoy_list = pd.read_csv(f'data/decoys/{decoy_set}/{pdb_id}/list.txt', sep='\s+')['NAME'].values
for decoy in tqdm(decoy_list):
decoy = decoy[:-4]
extract_beads(pdb_id, decoy, decoy_set=decoy_set)
# result = extract_one_topk(pdb_id, decoy, decoy_set=decoy_set, profile_set=profile_set)
# if result == 0:
# print(pdb_id, decoy)
# local_rot_list = []
# for pdb_id in pdb_list:
# if os.path.exists(f'data/decoys/{decoy_set}/{pdb_id}/native_local_rot.h5'):
# local_rot_list.append(pdb_id)
# pd.DataFrame({'pdb': local_rot_list}).to_csv(f'data/decoys/{decoy_set}/pdb_local_rot.txt', index=False)
# train100 = pd.read_csv('pdb_profile_100.txt')['pdb'].values
# train30 = pd.read_csv('pdb_profile_30.txt')['pdb'].values
# set_100 = set(train100)
# set_30 = set(train30)
# set_diff = set_100 - set_30
# pd.DataFrame({'pdb': list(set_diff)}).to_csv('pdb_profile_diff.txt', index=False)
def extract_decoy_set_3drobot_all():
pdb_list = pd.read_csv(f'data/decoys/3DRobot_set/pdb_profile_diff.txt')['pdb'].values
count = len(pdb_list)
num_cores = 40
batch_size = count // num_cores + 1
idx_list = np.arange(count)
batch_list = []
for i in range(0, count, batch_size):
batch = idx_list[i:i+batch_size]
batch_list.append(pdb_list[batch])
# setup the multi-processes
with mp.Pool(processes=num_cores) as pool:
pool.map(extract_decoy_set_3drobot, batch_list)
def check_h5_data_3drobot():
decoy_set = '3DRobot_set'
pdb_list = pd.read_csv(f'data/decoys/{decoy_set}/pdb_profile_diff.txt')['pdb'].values
for pdb_id in tqdm(pdb_list):
if pdb_id == '1WDDS':
continue
df = pd.read_csv(f'data/decoys/{decoy_set}/{pdb_id}/list.txt', sep='\s+')
decoy_list = df['NAME'].values
native_data_path = f'data/decoys/{decoy_set}/{pdb_id}/native_local_rot.h5'
native_data = h5py.File(native_data_path, 'r')
native_shape = native_data['seq'].shape[0]
for decoy in decoy_list:
data_path = f'data/decoys/{decoy_set}/{pdb_id}/{decoy[:-4]}_local_rot.h5'
if not os.path.exists(data_path):
print(f'{pdb_id} {decoy} no h5')
continue
decoy_data = h5py.File(data_path, 'r')
decoy_data_shape = decoy_data['seq'].shape[0]
if native_shape != decoy_data_shape:
print(f'{pdb_id} {decoy} different shape')
def extract_beads_batch_new(pdb_list):
# use all 200 proteins in the 3DRobot set
decoy_set = '3DRobot_set'
amino_acids = pd.read_csv('data/amino_acids.csv')
vocab_aa = [x.upper() for x in amino_acids.AA3C]
vocab_dict = {x.upper(): y for x, y in zip(amino_acids.AA3C, amino_acids.AA)}
# pdb_list = pd.read_csv(f'data/decoys/3DRobot_set/pdb_all.csv')['pdb'].values
# pdb_list = pd.read_csv(f'data/decoys/3DRobot_set/pdb_no_missing_residue.csv')['pdb'].values
# pdb_no_missing_list = []
for pdb_id in tqdm(pdb_list):
df_pdb = pd.read_csv(f'data/decoys/{decoy_set}/{pdb_id}/list.txt', sep='\s+')
decoy_list = df_pdb['NAME'].values
# decoy_list = ['native.pdb']
idx = np.zeros(decoy_list.shape[0])
for i, decoy in enumerate(decoy_list):
decoy = decoy[:-4]
try:
p = PDBParser()
structure = p.get_structure('X', f'data/decoys/{decoy_set}/{pdb_id}/{decoy}.pdb')
residue_list = Selection.unfold_entities(structure, 'R')
except:
continue
ca_center_list = []
cb_center_list = []
res_name_list = []
res_num_list = []
chain_list = []
for res in residue_list:
if res.get_resname() not in vocab_aa:
# raise ValueError('protein | |
h[1].data
h[1].data = None
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].data is None
assert h[1].header['NAXIS'] == 0
assert 'NAXIS1' not in h[1].header
assert 'NAXIS2' not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header['BLANK'] = 'nan'
with pytest.warns(fits.verify.VerifyWarning, match=r"Invalid value for "
r"'BLANK' keyword in header: 'nan'"):
hdu.writeto(self.temp('test.fits'))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as (hdu,):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
def test_hdu_creation_with_scalar(self):
msg = r'data object array\(1\) should have at least one dimension'
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=1)
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=1)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode='update') as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp('test.fits')) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
('data', 'compression_type', 'quantize_level'),
[(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_2', -0.01),
(np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16),
(np.zeros((10, 10)), 'PLIO_1', 16)])
@pytest.mark.parametrize('byte_order', ['<', '>'])
def test_comp_image(self, data, compression_type, quantize_level,
byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(data, name='SCI',
compression_type=compression_type,
quantize_level=quantize_level)
ofd.append(chdu)
ofd.writeto(self.temp('test_new.fits'), overwrite=True)
ofd.close()
with fits.open(self.temp('test_new.fits')) as fd:
assert (fd[1].data == data).all()
assert fd[1].header['NAXIS'] == chdu.header['NAXIS']
assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1']
assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2']
assert fd[1].header['BITPIX'] == chdu.header['BITPIX']
@pytest.mark.skipif('not HAS_SCIPY')
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import scipy.misc
np.random.seed(42)
data = scipy.misc.ascent() + np.random.randn(512, 512)*10
fits.ImageHDU(data).writeto(self.temp('im1.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-1, dither_seed=5)\
.writeto(self.temp('im2.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-100, dither_seed=5)\
.writeto(self.temp('im3.fits'))
im1 = fits.getdata(self.temp('im1.fits'))
im2 = fits.getdata(self.temp('im2.fits'))
im3 = fits.getdata(self.temp('im3.fits'))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(ValueError, fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32), name='SCI',
compression_type='HCOMPRESS_1', quantize_level=16,
tile_size=[2, 10, 10])
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(data=cube, name='SCI',
compression_type='HCOMPRESS_1',
quantize_level=16, tile_size=[5, 5, 1])
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul['SCI'].data - cube).max() < 1./15.
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view('uint8').sum() % 10000) + 1
hdu = fits.CompImageHDU(data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert 'ZQUANTIZ' in hdul[1]._header
assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1'
assert 'ZDITHER0' in hdul[1]._header
assert hdul[1]._header['ZDITHER0'] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with fits.open(self.data('comp.fits'),
disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data('comp.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file('comp.fits')
mtime = os.stat(self.temp('comp.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('comp.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('comp.fits')).st_mtime
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preseved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
hdul.close()
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(hdul1[1].compressed_data,
hdul2[1].compressed_data)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('fixed-1890-z.fits'))
hdul = fits.open(self.temp('fixed-1890-z.fits'))
orig_data = hdul[1].data
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp('fixed-1890-z.fits'))
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp('fixed-1890-z.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[1].header['BITPIX']
orig_bzero = hdul[1].header['BZERO']
orig_bscale = hdul[1].header['BSCALE']
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[1].header['BITPIX'] == orig_bitpix
assert hdul[1].header['BZERO'] == orig_bzero
assert hdul[1].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data('scale.fits')) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
rng = np.random.default_rng(42)
noise = rng.normal(size=(20, 20))
chdu1 = fits.CompImageHDU(data=noise, compression_type='GZIP_1')
# First make a test image with lossy compression | |
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from petition.models import Organization, Petition, PytitionUser
from petition.helpers import get_update_form
from petition.forms import DeleteAccountForm
users = ['julia', 'john', 'max', 'sarah']
orgs = ['RAP', 'Greenpeace', 'Attac', 'Les Amis de la Terre']
user_published_petitions = {
'john': 0,
'sarah': 0,
'julia': 5,
'max': 10
}
user_unpublished_petitions = {
'john': 0,
'sarah': 5,
'julia': 0,
'max': 10
}
org_published_petitions = {
'RAP': 0,
'Les Amis de la Terre': 0,
'Greenpeace': 1,
'Attac': 2
}
org_unpublished_petitions = {
'RAP': 0,
'Les Amis de la Terre': 1,
'Greenpeace': 0,
'Attac': 2
}
org_members = {
'RAP': ['julia'],
'Les Amis de la Terre': ['julia', 'max'],
'Attac': ['john'],
}
class AccountSettingsViewTest(TestCase):
"""Test index view"""
@classmethod
def setUpTestData(cls):
User = get_user_model()
for org in orgs:
o = Organization.objects.create(name=org)
for i in range(org_published_petitions[org]):
p = Petition.objects.create(published=True)
o.petitions.add(p)
p.save()
for i in range(org_unpublished_petitions[org]):
p = Petition.objects.create(published=False)
o.petitions.add(p)
p.save()
o.save()
for user in users:
u = User.objects.create_user(user, password=<PASSWORD>)
u.first_name = user
u.last_name = user + "Last"
u.save()
pu = PytitionUser.objects.get(user__username=user)
for i in range(user_published_petitions[user]):
p = Petition.objects.create(published=True)
pu.petitions.add(p)
p.save()
for i in range(user_unpublished_petitions[user]):
p = Petition.objects.create(published=False)
pu.petitions.add(p)
p.save()
for orgname in org_members:
org = Organization.objects.get(name=orgname)
for username in org_members[orgname]:
user = PytitionUser.objects.get(user__username=username)
org.add_member(user)
# give julia can_modify_petitions permission on "Les Amis de la Terre" organization
perm = PytitionUser.objects.get(user__username="julia").permissions\
.get(organization__name="Les Amis de la Terre")
perm.can_modify_petitions = True
perm.save()
def login(self, name, password=None):
self.client.login(username=name, password=password if password else name)
self.pu = PytitionUser.objects.get(user__username=name)
return self.pu
def logout(self):
self.client.logout()
def tearDown(self):
# Clean up run after every test method.
pass
def test_NotLoggedIn(self):
self.logout()
response = self.client.get(reverse("account_settings"), follow=True)
self.assertRedirects(response, reverse("login")+"?next="+reverse("account_settings"))
self.assertTemplateUsed(response, "registration/login.html")
self.assertTemplateUsed(response, "layouts/base.html")
def test_UserOK1(self):
john = self.login("john")
update_info_form = get_update_form(john.user)
response = self.client.get(reverse("account_settings"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], john)
self.assertEqual(response.context['update_info_form_submitted'], False)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], False)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), False)
self.assertEqual(response.context['password_change_form'].is_bound, False)
def test_UserOK2(self):
julia = self.login("julia")
response = self.client.get(reverse("account_settings"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], julia)
self.assertEqual(response.context['update_info_form_submitted'], False)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], False)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), False)
self.assertEqual(response.context['password_change_form'].is_bound, False)
def test_UserOK3(self):
max = self.login("max")
response = self.client.get(reverse("account_settings"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], max)
self.assertEqual(response.context['update_info_form_submitted'], False)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], False)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), False)
self.assertEqual(response.context['password_change_form'].is_bound, False)
def test_UserOK4(self):
sarah = self.login("sarah")
response = self.client.get(reverse("account_settings"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], sarah)
self.assertEqual(response.context['update_info_form_submitted'], False)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], False)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), False)
self.assertEqual(response.context['password_change_form'].is_bound, False)
def test_UserjohnPOSTUserInfoOK(self):
john = self.login("john")
update_info_form = get_update_form(john.user)
update_info_form.is_valid()
data = update_info_form.cleaned_data
data.update({
'update_info_form_submitted': 'yes',
})
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], john)
self.assertEqual(response.context['update_info_form_submitted'], True)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], False)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), False)
self.assertEqual(response.context['password_change_form'].is_bound, False)
def test_UserjohnPOSTPassChangeOK(self):
john = self.login("john")
new_pass = '<PASSWORD>&#'
data = {
'password_change_form_submitted': 'yes',
'old_password': '<PASSWORD>',
'new_password1': <PASSWORD>,
'new_password2': <PASSWORD>,
}
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], john)
self.assertEqual(response.context['update_info_form_submitted'], False)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], True)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), True)
self.assertEqual(response.context['password_change_form'].is_bound, True)
self.logout()
self.login("john", password=<PASSWORD>)
response2 = self.client.get(reverse("user_dashboard"))
self.assertEqual(response2.status_code, 200)
self.logout()
self.login("john")
response3 = self.client.get(reverse("user_dashboard"), follow=True)
self.assertRedirects(response3, reverse("login")+"?next="+reverse("user_dashboard"))
def test_UserjohnPOSTDeleteAccountOK(self):
# to avoid 404 error when index page redirects to deleted Organization profile page
with self.settings(INDEX_PAGE="ALL_PETITIONS"):
self.login("john")
data = {
'validation': "DROP MY ACCOUNT",
'delete_account_form_submitted': "yes",
}
f = DeleteAccountForm(data)
self.assertEqual(f.is_valid(), True)
response = self.client.post(reverse("account_settings"), data, follow=True)
self.assertRedirects(response, reverse("all_petitions"))
self.assertTemplateUsed(response, "layouts/base.html")
self.logout()
try:
self.login("john")
response2 = self.client.get(reverse("user_dashboard"))
self.assertRedirects(response2, reverse("login")+"?next="+reverse("user_dashboard"))
self.assertEqual(0, 1) # Should never be reached
except:
pass # I expected that!
pu = PytitionUser.objects.filter(user__username="john").count()
self.assertEqual(pu, 0)
User = get_user_model()
u = User.objects.filter(username="john").count()
self.assertEqual(u, 0)
def test_UsersarahPOSTUserInfoOK(self):
username = "sarah"
user = self.login(username)
update_info_form = get_update_form(user.user)
update_info_form.is_valid()
data = update_info_form.cleaned_data
data.update({
'update_info_form_submitted': 'yes',
})
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], user)
self.assertEqual(response.context['update_info_form_submitted'], True)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], False)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), False)
self.assertEqual(response.context['password_change_form'].is_bound, False)
def test_UsersarahPOSTPassChangeOK(self):
username ="sarah"
user = self.login(username)
new_pass = 'eytks<PASSWORD>&#'
data = {
'password_change_form_submitted': 'yes',
'old_password': username,
'new_password1': <PASSWORD>,
'new_password2': <PASSWORD>,
}
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], user)
self.assertEqual(response.context['update_info_form_submitted'], False)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], True)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), True)
self.assertEqual(response.context['password_change_form'].is_bound, True)
self.logout()
self.login(username, password=<PASSWORD>)
response2 = self.client.get(reverse("user_dashboard"))
self.assertEqual(response2.status_code, 200)
self.logout()
self.login(username)
response3 = self.client.get(reverse("user_dashboard"), follow=True)
self.assertRedirects(response3, reverse("login")+"?next="+reverse("user_dashboard"))
def test_UsersarahPOSTDeleteAccountOK(self):
# to avoid 404 error when index page redirects to deleted Organization profile page
with self.settings(INDEX_PAGE="ALL_PETITIONS"):
username = "sarah"
self.login(username)
data = {
'validation': "DROP MY ACCOUNT",
'delete_account_form_submitted': "yes",
}
f = DeleteAccountForm(data)
self.assertEqual(f.is_valid(), True)
response = self.client.post(reverse("account_settings"), data, follow=True)
self.assertRedirects(response, reverse("all_petitions"))
self.assertTemplateUsed(response, "layouts/base.html")
self.logout()
try:
self.login(username)
response2 = self.client.get(reverse("user_dashboard"))
self.assertRedirects(response2, reverse("login")+"?next="+reverse("user_dashboard"))
self.assertEqual(0, 1) # Should never be reached
except:
pass # I expected that!
pu = PytitionUser.objects.filter(user__username=username).count()
self.assertEqual(pu, 0)
User = get_user_model()
u = User.objects.filter(username=username).count()
self.assertEqual(u, 0)
def test_UserjuliaPOSTUserInfoOK(self):
username = "julia"
user = self.login(username)
update_info_form = get_update_form(user.user)
update_info_form.is_valid()
data = update_info_form.cleaned_data
data.update({
'update_info_form_submitted': 'yes',
})
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], user)
self.assertEqual(response.context['update_info_form_submitted'], True)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], False)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), False)
self.assertEqual(response.context['password_change_form'].is_bound, False)
def test_UserjuliaPOSTPassChangeOK(self):
username ="julia"
user = self.login(username)
new_pass = '<PASSWORD>&#'
data = {
'password_change_form_submitted': 'yes',
'old_password': username,
'new_password1': <PASSWORD>,
'new_password2': <PASSWORD>,
}
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], user)
self.assertEqual(response.context['update_info_form_submitted'], False)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], True)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), True)
self.assertEqual(response.context['password_change_form'].is_bound, True)
self.logout()
self.login(username, password=<PASSWORD>)
response2 = self.client.get(reverse("user_dashboard"))
self.assertEqual(response2.status_code, 200)
self.logout()
self.login(username)
response3 = self.client.get(reverse("user_dashboard"), follow=True)
self.assertRedirects(response3, reverse("login")+"?next="+reverse("user_dashboard"))
def test_UserjuliaPOSTDeleteAccountOK(self):
# to avoid 404 error when index page redirects to deleted Organization profile page
with self.settings(INDEX_PAGE="ALL_PETITIONS"):
username = "julia"
self.login(username)
data = {
'validation': "DROP MY ACCOUNT",
'delete_account_form_submitted': "yes",
}
f = DeleteAccountForm(data)
self.assertEqual(f.is_valid(), True)
response = self.client.post(reverse("account_settings"), data, follow=True)
self.assertRedirects(response, reverse("all_petitions"))
self.assertTemplateUsed(response, "layouts/base.html")
self.logout()
try:
self.login(username)
response2 = self.client.get(reverse("user_dashboard"))
self.assertRedirects(response2, reverse("login")+"?next="+reverse("user_dashboard"))
self.assertEqual(0, 1) # Should never be reached
except:
pass # I expected that!
pu = PytitionUser.objects.filter(user__username=username).count()
self.assertEqual(pu, 0)
User = get_user_model()
u = User.objects.filter(username=username).count()
self.assertEqual(u, 0)
def test_UsermaxPOSTUserInfoOK(self):
username = "max"
user = self.login(username)
update_info_form = get_update_form(user.user)
update_info_form.is_valid()
data = update_info_form.cleaned_data
data.update({
'update_info_form_submitted': 'yes',
})
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], user)
self.assertEqual(response.context['update_info_form_submitted'], True)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], False)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), False)
self.assertEqual(response.context['password_change_form'].is_bound, False)
def test_UsermaxPOSTPassChangeOK(self):
username ="max"
user = self.login(username)
new_pass = '<PASSWORD>&#'
data = {
'password_change_form_submitted': 'yes',
'old_password': <PASSWORD>,
'new_password1': <PASSWORD>,
'new_password2': <PASSWORD>,
}
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/account_settings.html")
self.assertTemplateUsed(response, "layouts/base.html")
self.assertEqual(response.context['user'], user)
self.assertEqual(response.context['update_info_form_submitted'], False)
self.assertEqual(response.context['delete_account_form_submitted'], False)
self.assertEqual(response.context['password_change_form_submitted'], True)
self.assertEqual(response.context['update_info_form'].is_valid(), True)
self.assertEqual(response.context['update_info_form'].is_bound, True)
self.assertEqual(response.context['delete_account_form'].is_valid(), False)
self.assertEqual(response.context['delete_account_form'].is_bound, False)
self.assertEqual(response.context['password_change_form'].is_valid(), True)
self.assertEqual(response.context['password_change_form'].is_bound, True)
self.logout()
self.login(username, password=<PASSWORD>)
response2 = self.client.get(reverse("user_dashboard"))
self.assertEqual(response2.status_code, 200)
self.logout()
self.login(username)
response3 = self.client.get(reverse("user_dashboard"), follow=True)
self.assertRedirects(response3, reverse("login")+"?next="+reverse("user_dashboard"))
def test_UsermaxPOSTDeleteAccountOK(self):
with self.settings(INDEX_PAGE="ALL_PETITIONS"):
username = "max"
self.login(username)
data = {
'validation': "DROP MY ACCOUNT",
'delete_account_form_submitted': "yes",
}
f = DeleteAccountForm(data)
self.assertEqual(f.is_valid(), True)
response = self.client.post(reverse("account_settings"), data, follow=True)
self.assertRedirects(response, reverse("all_petitions"))
self.assertTemplateUsed(response, "layouts/base.html")
self.logout()
try:
self.login(username)
response2 = self.client.get(reverse("user_dashboard"))
self.assertRedirects(response2, reverse("login")+"?next="+reverse("user_dashboard"))
self.assertEqual(0, 1) # Should never be reached
except:
pass # I expected that!
pu = PytitionUser.objects.filter(user__username=username).count()
self.assertEqual(pu, 0)
User = get_user_model()
u = User.objects.filter(username=username).count()
self.assertEqual(u, 0)
def test_UsermaxPOSTDeleteAccountValidNOK(self):
username = "max"
self.login(username)
data = {
'validation': "DO *NOT* DROP MY ACCOUNT",
'delete_account_form_submitted': "yes",
}
f = DeleteAccountForm(data)
self.assertEqual(f.is_valid(), False)
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "layouts/base.html")
self.assertTemplateUsed(response, "petition/account_settings.html")
self.logout()
self.login(username)
response2 = self.client.get(reverse("user_dashboard"))
self.assertEqual(response2.status_code, 200)
pu = PytitionUser.objects.filter(user__username=username).count()
self.assertEqual(pu, 1)
User = get_user_model()
u = User.objects.filter(username=username).count()
self.assertEqual(u, 1)
def test_UserjuliaPOSTDeleteAccountValidNOK(self):
username = "julia"
self.login(username)
data = {
'validation': "DO *NOT* DROP MY ACCOUNT",
'delete_account_form_submitted': "yes",
}
f = DeleteAccountForm(data)
self.assertEqual(f.is_valid(), False)
response = self.client.post(reverse("account_settings"), data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "layouts/base.html")
self.assertTemplateUsed(response, "petition/account_settings.html")
self.logout()
self.login(username)
response2 = self.client.get(reverse("user_dashboard"))
self.assertEqual(response2.status_code, 200)
pu = PytitionUser.objects.filter(user__username=username).count()
self.assertEqual(pu, 1)
User = get_user_model()
u = User.objects.filter(username=username).count()
self.assertEqual(u, 1)
def test_UserjohnPOSTDeleteAccountValidNOK(self):
username = "john"
self.login(username)
data = {
'validation': "DO *NOT* DROP MY ACCOUNT",
'delete_account_form_submitted': "yes",
}
| |
elif data_type.endswith("FLOAT"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">6f", payload[16 : 16 + 6 * 4])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h", payload[4:6])
strings = payload[6 : 6 + 16 * 26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6b", payload[16 : 16 + 6 * 1])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6i", payload[16 : 16 + 6 * 4])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">6d", payload[16 : 16 + 6 * 8])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
else:
info += "?"
info = info.restrip(", ")
return "{" + info + "}"
elif data_type.startswith("CTRL_"):
status, severity = unpack(">HH", payload[0:4])
info = "status:%d,severity:%d, " % (status, severity)
if data_type.endswith("STRING"):
pass
elif data_type.endswith("SHORT"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8h", payload[16 : 16 + 8 * 2])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("FLOAT"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">8f", payload[16 : 16 + 8 * 4])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h", payload[4:6])
strings = payload[6 : 6 + 16 * 26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8b", payload[16 : 16 + 8 * 1])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8i", payload[16 : 16 + 8 * 4])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">8d", payload[16 : 16 + 8 * 8])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
else:
info += "?"
info = info.rstrip(", ")
return "{" + info + "}"
return ""
def convert(PV_name, value):
"""Convert value to the correct data type for the given process variable"""
# The value of a PV might be passed as string when the PV type is acually
# DOUBLE.
current_value = PV_value(PV_name)
if current_value is None:
new_value = value
elif not isarray(current_value):
dtype = type(current_value)
try:
new_value = dtype(value)
except Exception as message:
if DEBUG:
debug(
"convert: %r from %r to %r failed: %r"
% (PV_name, value, dtype, message)
)
new_value = dtype()
else:
if not isarray(value):
value = [value]
# Convert each array element.
if len(current_value) > 0:
dtype = type(current_value[0])
else:
dtype = float
try:
new_value = [dtype(x) for x in value]
except Exception as message:
if DEBUG:
debug(
"convert: %r from %r to %r failed: %r"
% (PV_name, value, dtype, message)
)
new_value = [dtype()] * len(value)
if DEBUG:
debug("converted %r from %r to %r" % (PV_name, value, new_value))
return new_value
def CA_type_old(value):
"""Channel Access data type for a Python variable as integer type code"""
if isstring(value):
return types["STRING"]
if hasattr(value, "dtype"): # value is an array
from numpy import int8, int16, int32, float32, int64, float64
if value.dtype == int16:
return types["SHORT"]
if value.dtype == float32:
return types["FLOAT"]
if value.dtype == int8:
return types["CHAR"]
if value.dtype == int32:
return types["LONG"]
if value.dtype == int64:
return types["LONG"]
if value.dtype == float64:
return types["DOUBLE"]
if value.dtype == bool:
return types["LONG"]
return types["STRING"]
# If a list if given, use the first element to determine the type.
if isarray(value):
if len(value) > 0:
value = value[0]
else:
return types["DOUBLE"]
if isint(value):
return types["LONG"]
if isfloat(value):
return types["DOUBLE"]
if isbool(value):
return types["LONG"]
return types["STRING"]
def CA_type(value):
"""Channel Access data type for a Python variable as integer type code"""
CA_type = types["STRING"]
import numpy
if isarray(value):
if len(value) > 0:
value = value[0]
elif hasattr(value, "dtype"):
value = value.dtype.type()
else:
value = 0.0
if isstring(value):
CA_type = types["STRING"]
elif type(value) == numpy.int16:
CA_type = types["SHORT"]
elif type(value) == numpy.float32:
CA_type = types["FLOAT"]
elif type(value) == numpy.int8:
CA_type = types["CHAR"]
elif type(value) == numpy.int32:
CA_type = types["LONG"]
elif type(value) == numpy.int64:
CA_type = types["LONG"]
elif type(value) == numpy.float64:
CA_type = types["DOUBLE"]
elif type(value) == numpy.bool:
CA_type = types["LONG"]
elif isint(value):
CA_type = types["LONG"]
elif isfloat(value):
CA_type = types["DOUBLE"]
elif isbool(value):
CA_type = types["LONG"]
return CA_type
def CA_count(value):
"""If value is an array return the number of elements, else return 1.
In CA, a string counts as a single element."""
if isstring(value):
return 1
if isarray(value):
return len(value)
return 1
def CA_binary_data(value, data_type=None):
"""Binary data for network transmission
data_type: data type as integer or string (0 = "STRING", 1 = "SHORT", ...)
"""
payload = b""
if data_type is None:
data_type = CA_type(value)
data_type = type_name(data_type)
precision = 8 # Number of digits displayed in MEDM screen
from struct import pack
if data_type.startswith("STS_"):
status = 0 # 0 = normal
severity = 1 # 1 = success
payload += pack(">HH", status, severity)
# Add alignment padding to the header.
if data_type.endswith("CHAR"):
payload += b"\0"
elif data_type.endswith("DOUBLE"):
payload += b"\0" * 4
elif data_type.startswith("TIME_"):
# Add time header
from time import mktime, time
status = 0 # 0 = normal
severity = 1 # 1 = sucess
# The time stamp is represented as two uint32 values. The first is the
# number of seconds passed since 1 Jan 1990 00:00 GMT. The second is the
# number of nanoseconds within the second.
offset = mktime((1990, 1, 1, 0, 0, 0, 0, 0, 0)) - mktime(
(1970, 1, 1, 0, 0, 0, 0, 0, 0)
)
timestamp = time() - offset
seconds = int(timestamp)
nanoseconds = int((timestamp % 1) * 1e9)
payload += pack(">HHII", status, severity, seconds, nanoseconds)
# Add alignment padding to the header.
if data_type.endswith("SHORT"):
payload += b"\0" * 2
elif data_type.endswith("ENUM"):
payload += b"\0" * 2
elif data_type.endswith("CHAR"):
payload += b"\0" * 3
elif data_type.endswith("DOUBLE"):
payload += b"\0" * 4
elif data_type.startswith("GR_"):
status = 0 # 0 = normal
severity = 1 # 1 = success
payload += pack(">HH", status, severity)
if data_type.endswith("STRING"):
pass
elif data_type.endswith("SHORT"):
payload += b"\0" * (8 + 6 * 2) # unit,limits
elif data_type.endswith("FLOAT"):
payload += pack(">h", precision)
payload += b"\0" * (2 + 8 + 6 * 4) # pad,unit,limits
elif data_type.endswith("ENUM"):
payload += b"\0" * (2 + 16 * 26) # number of strings,strings
elif data_type.endswith("CHAR"):
payload += b"\0" * (8 + 6 * 1 + 1) # unit,limits,pad
elif data_type.endswith("LONG"):
payload += b"\0" * (8 + 6 * 4) # unit,limits
elif data_type.endswith("DOUBLE"):
payload += pack(">h", precision)
payload += b"\0" * (2 + 8 + 6 * 8) # pad,unit,limits
else:
if DEBUG:
debug("CA_binary_data: data type %r not supported\n" % data_type)
elif data_type.startswith("CTRL_"):
status = 0 # 0 = normal
severity = 1 # 1 = success
payload += pack(">HH", status, severity)
if data_type.endswith("STRING"):
pass
elif data_type.endswith("SHORT"):
payload += b"\0" * (8 + 8 * 2) # unit,limits
elif data_type.endswith("FLOAT"):
payload += pack(">h", precision)
payload += b"\0" * (2 + 8 + 8 * 4) # pad,unit,limits
elif data_type.endswith("ENUM"):
payload += b"\0" * (2 + 16 * 26) # number of strings,strings
elif data_type.endswith("CHAR"):
payload += b"\0" * (8 + 8 * 1 + 1) # unit,limits,pad
elif data_type.endswith("LONG"):
payload += b"\0" * (8 + 8 * 4) # unit,limits
elif data_type.endswith("DOUBLE"):
payload += pack(">h", precision)
payload += b"\0" * (2 + 8 + 8 * 8) # pad,unit,limits
else:
if DEBUG:
debug("CA_binary_data: data type %r not supported\n" % data_type)
from numpy import int8, int16, int32, float32, float64
if data_type.endswith("STRING"):
if isarray(value):
# Null-terminated strings.
payload += b"\0".join([str(v).encode("utf-8") for v in value])
else:
payload += str(value).encode("utf-8")
elif data_type.endswith("SHORT"):
if isarray(value):
for v in value:
payload += pack(">h", to(v, int16))
else:
payload += pack(">h", to(value, int16))
elif data_type.endswith("FLOAT"):
if isarray(value):
for v in value:
payload += pack(">f", to(v, float32))
else:
payload += pack(">f", to(value, float32))
elif data_type.endswith("ENUM"):
if isarray(value):
for v in value:
payload += pack(">h", to(v, int16))
else:
payload += pack(">h", to(value, int16))
elif data_type.endswith("CHAR"):
if isarray(value):
for | |
creation.
:param Mapping[str, str] tags: A map of tags to assign to ecs resources.
:param int task_count: The number of tasks to create based on the TaskDefinition. The default is 1.
"""
pulumi.set(__self__, "task_definition_arn", task_definition_arn)
if enable_ecs_managed_tags is not None:
pulumi.set(__self__, "enable_ecs_managed_tags", enable_ecs_managed_tags)
if enable_execute_command is not None:
pulumi.set(__self__, "enable_execute_command", enable_execute_command)
if group is not None:
pulumi.set(__self__, "group", group)
if launch_type is not None:
pulumi.set(__self__, "launch_type", launch_type)
if network_configuration is not None:
pulumi.set(__self__, "network_configuration", network_configuration)
if placement_constraints is not None:
pulumi.set(__self__, "placement_constraints", placement_constraints)
if platform_version is not None:
pulumi.set(__self__, "platform_version", platform_version)
if propagate_tags is not None:
pulumi.set(__self__, "propagate_tags", propagate_tags)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if task_count is not None:
pulumi.set(__self__, "task_count", task_count)
@property
@pulumi.getter(name="taskDefinitionArn")
def task_definition_arn(self) -> str:
"""
The ARN of the task definition to use if the event target is an Amazon ECS cluster.
"""
return pulumi.get(self, "task_definition_arn")
@property
@pulumi.getter(name="enableEcsManagedTags")
def enable_ecs_managed_tags(self) -> Optional[bool]:
"""
Specifies whether to enable Amazon ECS managed tags for the task.
"""
return pulumi.get(self, "enable_ecs_managed_tags")
@property
@pulumi.getter(name="enableExecuteCommand")
def enable_execute_command(self) -> Optional[bool]:
"""
Whether or not to enable the execute command functionality for the containers in this task. If true, this enables execute command functionality on all containers in the task.
"""
return pulumi.get(self, "enable_execute_command")
@property
@pulumi.getter
def group(self) -> Optional[str]:
"""
Specifies an ECS task group for the task. The maximum length is 255 characters.
"""
return pulumi.get(self, "group")
@property
@pulumi.getter(name="launchType")
def launch_type(self) -> Optional[str]:
"""
Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. Valid values include: an empty string `""` (to specify no launch type), `EC2`, or `FARGATE`.
"""
return pulumi.get(self, "launch_type")
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> Optional['outputs.EventTargetEcsTargetNetworkConfiguration']:
"""
Use this if the ECS task uses the awsvpc network mode. This specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. Required if launch_type is FARGATE because the awsvpc mode is required for Fargate tasks.
"""
return pulumi.get(self, "network_configuration")
@property
@pulumi.getter(name="placementConstraints")
def placement_constraints(self) -> Optional[Sequence['outputs.EventTargetEcsTargetPlacementConstraint']]:
"""
An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime). See Below.
"""
return pulumi.get(self, "placement_constraints")
@property
@pulumi.getter(name="platformVersion")
def platform_version(self) -> Optional[str]:
"""
Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. This is used only if LaunchType is FARGATE. For more information about valid platform versions, see [AWS Fargate Platform Versions](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html).
"""
return pulumi.get(self, "platform_version")
@property
@pulumi.getter(name="propagateTags")
def propagate_tags(self) -> Optional[str]:
"""
Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation.
"""
return pulumi.get(self, "propagate_tags")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
A map of tags to assign to ecs resources.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="taskCount")
def task_count(self) -> Optional[int]:
"""
The number of tasks to create based on the TaskDefinition. The default is 1.
"""
return pulumi.get(self, "task_count")
@pulumi.output_type
class EventTargetEcsTargetNetworkConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "assignPublicIp":
suggest = "assign_public_ip"
elif key == "securityGroups":
suggest = "security_groups"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EventTargetEcsTargetNetworkConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EventTargetEcsTargetNetworkConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EventTargetEcsTargetNetworkConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
subnets: Sequence[str],
assign_public_ip: Optional[bool] = None,
security_groups: Optional[Sequence[str]] = None):
"""
:param Sequence[str] subnets: The subnets associated with the task or service.
:param bool assign_public_ip: Assign a public IP address to the ENI (Fargate launch type only). Valid values are `true` or `false`. Default `false`.
:param Sequence[str] security_groups: The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used.
"""
pulumi.set(__self__, "subnets", subnets)
if assign_public_ip is not None:
pulumi.set(__self__, "assign_public_ip", assign_public_ip)
if security_groups is not None:
pulumi.set(__self__, "security_groups", security_groups)
@property
@pulumi.getter
def subnets(self) -> Sequence[str]:
"""
The subnets associated with the task or service.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter(name="assignPublicIp")
def assign_public_ip(self) -> Optional[bool]:
"""
Assign a public IP address to the ENI (Fargate launch type only). Valid values are `true` or `false`. Default `false`.
"""
return pulumi.get(self, "assign_public_ip")
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Optional[Sequence[str]]:
"""
The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used.
"""
return pulumi.get(self, "security_groups")
@pulumi.output_type
class EventTargetEcsTargetPlacementConstraint(dict):
def __init__(__self__, *,
type: str,
expression: Optional[str] = None):
"""
:param str type: Type of constraint. The only valid values at this time are `memberOf` and `distinctInstance`.
:param str expression: Cluster Query Language expression to apply to the constraint. Does not need to be specified for the `distinctInstance` type. For more information, see [Cluster Query Language in the Amazon EC2 Container Service Developer Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html).
"""
pulumi.set(__self__, "type", type)
if expression is not None:
pulumi.set(__self__, "expression", expression)
@property
@pulumi.getter
def type(self) -> str:
"""
Type of constraint. The only valid values at this time are `memberOf` and `distinctInstance`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def expression(self) -> Optional[str]:
"""
Cluster Query Language expression to apply to the constraint. Does not need to be specified for the `distinctInstance` type. For more information, see [Cluster Query Language in the Amazon EC2 Container Service Developer Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html).
"""
return pulumi.get(self, "expression")
@pulumi.output_type
class EventTargetHttpTarget(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "headerParameters":
suggest = "header_parameters"
elif key == "pathParameterValues":
suggest = "path_parameter_values"
elif key == "queryStringParameters":
suggest = "query_string_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EventTargetHttpTarget. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EventTargetHttpTarget.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EventTargetHttpTarget.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
header_parameters: Optional[Mapping[str, str]] = None,
path_parameter_values: Optional[Sequence[str]] = None,
query_string_parameters: Optional[Mapping[str, str]] = None):
"""
:param Mapping[str, str] header_parameters: Enables you to specify HTTP headers to add to the request.
:param Sequence[str] path_parameter_values: The list of values that correspond sequentially to any path variables in your endpoint ARN (for example `arn:aws:execute-api:us-east-1:123456:myapi/*/POST/pets/*`).
:param Mapping[str, str] query_string_parameters: Represents keys/values of query string parameters that are appended to the invoked endpoint.
"""
if header_parameters is not None:
pulumi.set(__self__, "header_parameters", header_parameters)
if path_parameter_values is not None:
pulumi.set(__self__, "path_parameter_values", path_parameter_values)
if query_string_parameters is not None:
pulumi.set(__self__, "query_string_parameters", query_string_parameters)
@property
@pulumi.getter(name="headerParameters")
def header_parameters(self) -> Optional[Mapping[str, str]]:
"""
Enables you to specify HTTP headers to add to the request.
"""
return pulumi.get(self, "header_parameters")
@property
@pulumi.getter(name="pathParameterValues")
def path_parameter_values(self) -> Optional[Sequence[str]]:
"""
The list of values that correspond sequentially to any path variables in your endpoint ARN (for example `arn:aws:execute-api:us-east-1:123456:myapi/*/POST/pets/*`).
"""
return pulumi.get(self, "path_parameter_values")
@property
@pulumi.getter(name="queryStringParameters")
def query_string_parameters(self) -> Optional[Mapping[str, str]]:
"""
Represents keys/values of query string parameters that are appended to the invoked endpoint.
"""
return pulumi.get(self, "query_string_parameters")
@pulumi.output_type
class EventTargetInputTransformer(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "inputTemplate":
suggest = "input_template"
elif key == "inputPaths":
suggest = "input_paths"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EventTargetInputTransformer. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EventTargetInputTransformer.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EventTargetInputTransformer.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
input_template: str,
input_paths: Optional[Mapping[str, str]] = None):
"""
:param str input_template: Template to customize data sent to the target. Must be valid JSON. To send a string value, the string value must include double quotes. Values must be escaped for both JSON and the provider, e.g., `"\"Your string goes here.\\nA new line.\""`
:param Mapping[str, str] input_paths: | |
<gh_stars>1-10
"""Módulo 'normalizer' de georef-ar-api
Contiene funciones que manejan la lógica de procesamiento
de los recursos que expone la API.
"""
import logging
from flask import current_app
from service import data, params, formatter, address, location, utils
from service import names as N
from service.query_result import QueryResult
logger = logging.getLogger('georef')
def get_elasticsearch():
"""Devuelve la conexión a Elasticsearch activa para la sesión
de flask. La conexión es creada si no existía.
Returns:
Elasticsearch: conexión a Elasticsearch.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
"""
if not hasattr(current_app, 'elasticsearch'):
current_app.elasticsearch = data.elasticsearch_connection(
hosts=current_app.config['ES_HOSTS'],
sniff=current_app.config['ES_SNIFF'],
sniffer_timeout=current_app.config['ES_SNIFFER_TIMEOUT']
)
return current_app.elasticsearch
def _process_entity_single(request, name, param_parser, key_translations):
"""Procesa una request GET para consultar datos de una entidad.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET de flask.
name (str): Nombre de la entidad.
param_parser (ParameterSet): Objeto utilizado para parsear los
parámetros.
key_translations (dict): Traducciones de keys a utilizar para convertir
el diccionario de parámetros del usuario a un diccionario
representando una query a Elasticsearch.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
qs_params = param_parser.parse_get_params(request.args)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_single(e.errors, e.fmt)
# Construir query a partir de parámetros
query = utils.translate_keys(qs_params, key_translations,
ignore=[N.FLATTEN, N.FORMAT])
# Construir reglas de formato a partir de parámetros
fmt = {
key: qs_params[key]
for key in [N.FLATTEN, N.FIELDS, N.FORMAT]
if key in qs_params
}
if fmt[N.FORMAT] == 'shp':
query['fields'] += (N.GEOM,)
es = get_elasticsearch()
search_class = data.entity_search_class(name)
search = search_class(query)
data.ElasticsearchSearch.run_searches(es, [search])
query_result = QueryResult.from_entity_list(search.result.hits,
search.result.total,
search.result.offset)
return formatter.create_ok_response(name, query_result, fmt)
def _process_entity_bulk(request, name, param_parser, key_translations):
"""Procesa una request POST para consultar datos de una lista de entidades.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request POST de flask.
name (str): Nombre de la entidad.
param_parser (ParameterSet): Objeto utilizado para parsear los
parámetros.
key_translations (dict): Traducciones de keys a utilizar para convertir
los diccionarios de parámetros del usuario a una lista de
diccionarios representando las queries a Elasticsearch.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
body_params = param_parser.parse_post_params(
request.args, request.json, name)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_bulk(e.errors)
queries = []
formats = []
for parsed_params in body_params:
# Construir query a partir de parámetros
query = utils.translate_keys(parsed_params, key_translations,
ignore=[N.FLATTEN, N.FORMAT])
# Construir reglas de formato a partir de parámetros
fmt = {
key: parsed_params[key]
for key in [N.FLATTEN, N.FIELDS]
if key in parsed_params
}
queries.append(query)
formats.append(fmt)
es = get_elasticsearch()
search_class = data.entity_search_class(name)
searches = [search_class(query) for query in queries]
data.ElasticsearchSearch.run_searches(es, searches)
query_results = [
QueryResult.from_entity_list(search.result.hits,
search.result.total,
search.result.offset)
for search in searches
]
return formatter.create_ok_response_bulk(name, query_results, formats)
def _process_entity(request, name, param_parser, key_translations):
"""Procesa una request GET o POST para consultar datos de una entidad.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
En caso de ocurrir un error interno, se retorna una respuesta HTTP 500.
Args:
request (flask.Request): Request GET o POST de flask.
name (str): Nombre de la entidad.
param_parser (ParameterSet): Objeto utilizado para parsear los
parámetros.
key_translations (dict): Traducciones de keys a utilizar para convertir
los diccionarios de parámetros del usuario a una lista de
diccionarios representando las queries a Elasticsearch.
Returns:
flask.Response: respuesta HTTP
"""
try:
if request.method == 'GET':
return _process_entity_single(request, name, param_parser,
key_translations)
return _process_entity_bulk(request, name, param_parser,
key_translations)
except data.DataConnectionException:
logger.exception(
'Excepción en manejo de consulta para recurso: {}'.format(name))
return formatter.create_internal_error_response()
def process_state(request):
"""Procesa una request GET o POST para consultar datos de provincias.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(request, N.STATES, params.PARAMS_STATES, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
})
def process_department(request):
"""Procesa una request GET o POST para consultar datos de departamentos.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(
request, N.DEPARTMENTS,
params.PARAMS_DEPARTMENTS, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.STATE: 'state',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
})
def process_municipality(request):
"""Procesa una request GET o POST para consultar datos de municipios.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(
request, N.MUNICIPALITIES,
params.PARAMS_MUNICIPALITIES, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.STATE: 'state',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
})
def process_locality(request):
"""Procesa una request GET o POST para consultar datos de localidades.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(request, N.LOCALITIES, params.PARAMS_LOCALITIES, {
N.ID: 'ids',
N.NAME: 'name',
N.STATE: 'state',
N.DEPT: 'department',
N.MUN: 'municipality',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
})
def _build_street_query_format(parsed_params):
"""Construye dos diccionarios a partir de parámetros de consulta
recibidos, el primero representando la query a Elasticsearch a
realizar y el segundo representando las propiedades de formato
(presentación) que se le debe dar a los datos obtenidos de la misma.
Args:
parsed_params (dict): Parámetros de una consulta para el índice de
calles.
Returns:
tuple: diccionario de query y diccionario de formato
"""
# Construir query a partir de parámetros
query = utils.translate_keys(parsed_params, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.STATE: 'state',
N.DEPT: 'department',
N.EXACT: 'exact',
N.FIELDS: 'fields',
N.CATEGORY: 'category',
N.OFFSET: 'offset',
N.ORDER: 'order',
N.MAX: 'size'
}, ignore=[N.FLATTEN, N.FORMAT])
# Construir reglas de formato a partir de parámetros
fmt = {
key: parsed_params[key]
for key in [N.FLATTEN, N.FIELDS, N.FORMAT]
if key in parsed_params
}
return query, fmt
def _process_street_single(request):
"""Procesa una request GET para consultar datos de calles.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET de flask.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
qs_params = params.PARAMS_STREETS.parse_get_params(request.args)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_single(e.errors, e.fmt)
query, fmt = _build_street_query_format(qs_params)
if fmt[N.FORMAT] == 'shp':
query['fields'] += (N.GEOM,)
es = get_elasticsearch()
search = data.StreetsSearch(query)
data.ElasticsearchSearch.run_searches(es, [search])
query_result = QueryResult.from_entity_list(search.result.hits,
search.result.total,
search.result.offset)
return formatter.create_ok_response(N.STREETS, query_result, fmt)
def _process_street_bulk(request):
"""Procesa una request POST para consultar datos de calles.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request POST de flask.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
body_params = params.PARAMS_STREETS.parse_post_params(
request.args, request.json, N.STREETS)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_bulk(e.errors)
queries = []
formats = []
for parsed_params in body_params:
query, fmt = _build_street_query_format(parsed_params)
queries.append(query)
formats.append(fmt)
es = get_elasticsearch()
searches = [data.StreetsSearch(query) for query in queries]
data.ElasticsearchSearch.run_searches(es, searches)
query_results = [
QueryResult.from_entity_list(search.result.hits,
search.result.total,
search.result.offset)
for search in searches
]
return formatter.create_ok_response_bulk(N.STREETS, query_results, formats)
def process_street(request):
"""Procesa una request GET o POST para consultar datos de calles.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
En caso de ocurrir un error interno, se retorna una respuesta HTTP 500.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
try:
if request.method == 'GET':
return _process_street_single(request)
return _process_street_bulk(request)
except data.DataConnectionException:
logger.exception(
'Excepción en manejo de consulta para recurso: calles')
return formatter.create_internal_error_response()
def _build_address_query_format(parsed_params):
"""Construye dos diccionarios a partir de parámetros de consulta
recibidos, el primero representando la query a Elasticsearch a
realizar y el segundo representando las propiedades de formato
(presentación) que se le debe dar a los datos obtenidos de la misma.
| |
# coding=utf-8
##########################################################
# Authors: <NAME>, <NAME>, <NAME>
# Affiliation: University of Geneva
# Version: 1.4.5
# Date: 13.01.2022
# Downscaling of Swiss LCLU data
##########################################################
# import libraries
import numpy, math
import pandas as pd
import os
from osgeo import gdal # import GDAL
import shutil
##################################################################################################
# Step 1: create a land use grid at 100m resolution from Landuse100 statistics
# Step 2: remove from Landuse100 categories that correspond to linear features (river, road, train)
# Step 3: Rasterize the primary surfaces land cover vector base map at a 25m resolution (BaseMap25)
##################################################################################################
####################################################################################
# Step 4: Visit each BaseMap25 pixel
# Step 10: Loop from point 4 to 11 with next BaseMap25 pixel
# Input: BaseMap25 is the swisstopo layer for which we will visit each pixel
# Input: Expert table to get acceptable values and related weight
####################################################################################
#copy the input files to /scratch storage on the cluster -> accesible to all the nodes
originalp = 'PRI09_25.tiff'
targetp = '/scratch/PRI09_25.tiff'
shutil.copyfile(originalp, targetp)
originala = 'AS09_72_25.tiff'
targeta = '/scratch/AS09_72_25.tiff'
shutil.copyfile(originala, targeta)
# Get the size (columns/rows) of the Base Map 25 raster
raster = gdal.Open(targetp) # open raster
cols = raster.RasterXSize # get columns
rows = raster.RasterYSize # get rows
band = raster.GetRasterBand(1) # get band
data = band.ReadAsArray(0, 0, cols, rows) # read raster at once
print('BaseMap25 - Image Size: Rows:'+str(rows)+' Columns:'+str(cols))
# Get the size (columns/rows) of the Landuse 100 raster
#LUrast = gdal.Open('AS09_72s25.tiff')
LUrast = gdal.Open(targeta)
cols2 = LUrast.RasterXSize
rows2 = LUrast.RasterYSize
band2 = LUrast.GetRasterBand(1)
data2 = band2.ReadAsArray(0, 0, cols2, rows2)
print('Landuse100 - Image Size: Rows:'+str(rows2)+' Columns:'+str(cols2))
###### Baobab - chunking ######
nC = 30 #nr of columns
nR = 30 #nr of rows
rowst = rows / nR
colst = cols / nC
i = int( os.environ['SLURM_ARRAY_TASK_ID'] )
r = i //nC
c = i % nC
print (i,r,c)
col0 = int(c*colst)
col1 = int((c+1)*colst)
row0 = int(r*rowst)
row1 = int((r+1)*rowst)
print( f"Computing chunk {i} ({r}x{c})")
print( f"rows: {row0} - {row1}" )
print( f"cols: {col0} - {col1}" )
###### create output raster file ######
ds_raster = f"output/output_{r}x{c}.tif"
#ds_raster = 'LU-CH.tif' # filename
driver_tiff = gdal.GetDriverByName('GTiff') # GeoTiff
ds = driver_tiff.Create(ds_raster, col1-col0, row1-row0, 1, gdal.GDT_Byte) # create the output file
ds.SetGeoTransform(raster.GetGeoTransform()) # get the coordinate system
ds.SetProjection(raster.GetProjection()) # get the projection
ds.FlushCache() # save file
ds = None # close file
##### open expert table #####
#loc = 'expert_table_72cat_v4.xls' # path to the expert table
originalx = 'expert_table_72cat_v4.xls'
targetx = '/scratch/expert_table_72cat_v4.xls'
shutil.copyfile(originalx, targetx)
sheet = pd.read_excel(io=targetx, sheet_name="Sheet1")
xls_cols = len(sheet.columns)
print("Excel cols ", xls_cols)
xls_rows = len(sheet.index)
print("Excel rows ", xls_rows)
#iterate by lines and columns
for y in range(row0, row1):
for x in range(col0, col1):
value = data[y, x] #get pixel value (BaseMap25)
if value > 0 and value < 255: #only do something if the pixel value is greater than 0 (0=country mask) and smaller than 255 (no data)
#print('BaseMap25 - Row:'+str(y), 'Column:'+str(x), 'Value:'+str(value)) #to locate current pixel and value
##############################################################################################################
#Step 5: According to expert system table, select those categories that could be elected for the current pixel
##############################################################################################################
BMvalue1 = [] # create an empty array to be filled by values 1 for BaspeMap25
BMvalue2 = [] # create an empty array to be filled by values 2 for BaspeMap25
BMvalue3 = [] # create an empty array to be filled by values 3 for BaspeMap25
for i in range(xls_cols): #iterate in columns to find the BaseMap25 value
if sheet.iat[1, i] == value: #once identified the corresponding value
j = 2 #start at the 3rd row to remove headers
while j < xls_rows: #read the identified column
if sheet.iat[j, i] == 1: #acceptable weight values for 1, possible choices
BMvalue1.append(str(int(sheet.iat[j, 1]))+';'+str(sheet.iat[j, 2])+';'+str(int(sheet.iat[j, i]))) #insert [CODE, Landuse100, weight]
if sheet.iat[j, i] == 2: #acceptable weight values for 2, unique choice
BMvalue2.append(str(int(sheet.iat[j, 1]))+';'+str(sheet.iat[j, 2])+';'+str(int(sheet.iat[j, i]))) #insert [CODE, Landuse100, weight]
if sheet.iat[j, i] == 3: #acceptable weight values for 3, best replacement choice in case of lack of decision
BMvalue3.append(str(int(sheet.iat[j, 1]))+';'+str(sheet.iat[j, 2])+';'+str(int(sheet.iat[j, i]))) #insert [CODE, Landuse100, weight]
j = j+1 #iterate until last row of the expert table
#print('Number of acceptable values 1 in the expert table:' + str(len(BMvalue1)))
#print('Number of acceptable values 2 in the expert table:' + str(len(BMvalue2)))
#print('Number of acceptable values 3 in the expert table:' + str(len(BMvalue3)))
############################################################################################
# Step 6: Select among the 36 nearest Landuse100 neigbours those with acceptable categories
# Input: Landuse100 is from geostat and for which we will look for the 36 nearest neighboors
############################################################################################
sizeWin = 20 #definition of the size of the window to identify nearest neighboors, should be 24 to match 600m
value2 = data2[y, x] # get pixel value (Landuse100)
#print('Landuse100 - Row:'+str(y)+' Column:'+str(x)+' Value:'+str(value2))
LUvalue = [] # create an empty array to be filled by values for Landuse100
#iterate in the neighbours window starting from the UL corner
#yRow = y - round(sizeWin/2) #UL coordinate for origin of window
yRow = round(y/4)*4 - 9
#for a in range(sizeWin): # row
for a in range(6):
#xCol = x - round(sizeWin/2) # UL coordinate for origin of window
xCol = round(x/4)*4 - 10
#for b in range(sizeWin): # column
for b in range(6):
#print("yRow, xCol :", yRow, xCol)
if (yRow >= 0 and xCol >= 0 and yRow < rows and xCol < cols):
if data2[yRow, xCol] < 255: # only pixel values inside Switzerland, nodata = 255
LUvalue.append(
str(yRow) + ';' + str(xCol) + ';' + str(data2[yRow, xCol])) # insert [Row;Column;Value]
xCol = xCol + 4 #move from 4 pixels to correspond to a 100 pixel
#print("search x", xCol)
yRow = yRow + 4 #move form 4 pixels to correspond to a 100 pixel
#print("search y", yRow)
#print('Number of acceptable values in Landuse100:' + str(len(LUvalue)))
if (len(LUvalue)) == 0: #if not acceptable values, empty array
print('Landuse100 array is empty')
uniqueValues = [0] #then the uniqueValues array is equal to 0 > pixelArrayValue will be empty
########################################################################
# Step 7: Calculate the inverse distance to each neighbour
# Step 8: Sum up the inverse distances for each category
# Step 9: Assign the category with higher score to the BaseMap25 pixel
# Input: LUvalue array; [optional] Alti 25 for Z values
########################################################################
newArray = []
pixelValueArray = []
pixelValue = 0
uniqueValues = []
###### Case 2 #####
#print('BMValue1 length:' + str(len(BMvalue1)))
#print('BMValue2 length:' + str(len(BMvalue2)))
#print('BMValue3 length:' + str(len(BMvalue3)))
if len(BMvalue2) > 0: # unique value case; BM25 value = 2 then assign the only value possible in LU100
pixelValue = BMvalue2[0].split(';')[0] # directly assign the value
#print('Assigned pixel value case 2: ' + str(pixelValue))
###### Case 3 #####
if len(BMvalue1) > 0 and len(BMvalue3) > 0 and len(BMvalue2)==0: #case with possible value (1) and (3); (3) = default choice
for d in range(len(LUvalue)):
newArray.append(LUvalue[d].split(';')[2]) # position 2 is the value
uniqueValues = numpy.unique(newArray) # get unique values from the array
for m in range(len(BMvalue1)): #iterate in all possible values for BM25 class = 1
for n in range(len(uniqueValues)): #iterate in all possible unique values of LU100
if uniqueValues[n] == BMvalue1[m].split(';')[0]: #compare values from BM25 and LU100
pixelValueArray.append(int(uniqueValues[n])) #insert in array only acceptable values
for m in range(len(BMvalue3)): #iterate in all possible values for BM25 class = 3
for n in range(len(uniqueValues)): #iterate in all possible unique values of LU100
if uniqueValues[n] == BMvalue3[m].split(';')[0]: #compare values from BM25 and LU100
pixelValueArray.append(int(uniqueValues[n])) #insert in array only acceptable values
if len(pixelValueArray) == 1: # if only 1 value is stored in the array
pixelValue = int(pixelValueArray[0]) # assign the new pixel value to be written in the new raster file
#print('Assigned pixel value DD: ' + str(pixelValue))
elif len(pixelValueArray) == 0: #in case the acceptable value array is empty, assign the default (3) value
pixelValue = BMvalue3[0].split(';')[0] # assign the default (3) value
#print('Assigned default pixel value case 3 ' + str(pixelValue))
else:
pxVal = [] # store class and sum of IDW
pxVal2 = [] # store only IDW values to identify the highest one
for l in range(
len(pixelValueArray)): # iterate in LUvalue array to get position and calculate distances
px = [] # array for measuring distance
idwClass = 0 # used for summing IDW
for i in range(len(LUvalue)):
if pixelValueArray[l] == int(LUvalue[i].split(';')[2]): # ensure that we iterate only with | |
<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
from itertools import chain
from google.api_core.protobuf_helpers import get_messages
from google.ads.google_ads import util
if sys.version_info < (3, 7):
raise ImportError('This module requires Python 3.7 or later.')
_lazy_name_to_package_map = dict(
ad_asset_pb2='google.ads.google_ads.v6.proto.common',
ad_type_infos_pb2='google.ads.google_ads.v6.proto.common',
asset_policy_pb2='google.ads.google_ads.v6.proto.common',
asset_types_pb2='google.ads.google_ads.v6.proto.common',
bidding_pb2='google.ads.google_ads.v6.proto.common',
click_location_pb2='google.ads.google_ads.v6.proto.common',
criteria_pb2='google.ads.google_ads.v6.proto.common',
criterion_category_availability_pb2='google.ads.google_ads.v6.proto.common',
custom_parameter_pb2='google.ads.google_ads.v6.proto.common',
dates_pb2='google.ads.google_ads.v6.proto.common',
explorer_auto_optimizer_setting_pb2='google.ads.google_ads.v6.proto.common',
extensions_pb2='google.ads.google_ads.v6.proto.common',
feed_common_pb2='google.ads.google_ads.v6.proto.common',
feed_item_set_filter_type_infos_pb2='google.ads.google_ads.v6.proto.common',
final_app_url_pb2='google.ads.google_ads.v6.proto.common',
frequency_cap_pb2='google.ads.google_ads.v6.proto.common',
keyword_plan_common_pb2='google.ads.google_ads.v6.proto.common',
matching_function_pb2='google.ads.google_ads.v6.proto.common',
metrics_pb2='google.ads.google_ads.v6.proto.common',
offline_user_data_pb2='google.ads.google_ads.v6.proto.common',
policy_pb2='google.ads.google_ads.v6.proto.common',
real_time_bidding_setting_pb2='google.ads.google_ads.v6.proto.common',
segments_pb2='google.ads.google_ads.v6.proto.common',
simulation_pb2='google.ads.google_ads.v6.proto.common',
tag_snippet_pb2='google.ads.google_ads.v6.proto.common',
targeting_setting_pb2='google.ads.google_ads.v6.proto.common',
text_label_pb2='google.ads.google_ads.v6.proto.common',
url_collection_pb2='google.ads.google_ads.v6.proto.common',
user_lists_pb2='google.ads.google_ads.v6.proto.common',
value_pb2='google.ads.google_ads.v6.proto.common',
access_reason_pb2='google.ads.google_ads.v6.proto.enums',
access_role_pb2='google.ads.google_ads.v6.proto.enums',
account_budget_proposal_status_pb2='google.ads.google_ads.v6.proto.enums',
account_budget_proposal_type_pb2='google.ads.google_ads.v6.proto.enums',
account_budget_status_pb2='google.ads.google_ads.v6.proto.enums',
account_link_status_pb2='google.ads.google_ads.v6.proto.enums',
ad_customizer_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
ad_destination_type_pb2='google.ads.google_ads.v6.proto.enums',
ad_group_ad_rotation_mode_pb2='google.ads.google_ads.v6.proto.enums',
ad_group_ad_status_pb2='google.ads.google_ads.v6.proto.enums',
ad_group_criterion_approval_status_pb2='google.ads.google_ads.v6.proto.enums',
ad_group_criterion_status_pb2='google.ads.google_ads.v6.proto.enums',
ad_group_status_pb2='google.ads.google_ads.v6.proto.enums',
ad_group_type_pb2='google.ads.google_ads.v6.proto.enums',
ad_network_type_pb2='google.ads.google_ads.v6.proto.enums',
ad_serving_optimization_status_pb2='google.ads.google_ads.v6.proto.enums',
ad_strength_pb2='google.ads.google_ads.v6.proto.enums',
ad_type_pb2='google.ads.google_ads.v6.proto.enums',
advertising_channel_sub_type_pb2='google.ads.google_ads.v6.proto.enums',
advertising_channel_type_pb2='google.ads.google_ads.v6.proto.enums',
affiliate_location_feed_relationship_type_pb2='google.ads.google_ads.v6.proto.enums',
affiliate_location_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
age_range_type_pb2='google.ads.google_ads.v6.proto.enums',
app_campaign_app_store_pb2='google.ads.google_ads.v6.proto.enums',
app_campaign_bidding_strategy_goal_type_pb2='google.ads.google_ads.v6.proto.enums',
app_payment_model_type_pb2='google.ads.google_ads.v6.proto.enums',
app_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
app_store_pb2='google.ads.google_ads.v6.proto.enums',
app_url_operating_system_type_pb2='google.ads.google_ads.v6.proto.enums',
asset_field_type_pb2='google.ads.google_ads.v6.proto.enums',
asset_link_status_pb2='google.ads.google_ads.v6.proto.enums',
asset_performance_label_pb2='google.ads.google_ads.v6.proto.enums',
asset_type_pb2='google.ads.google_ads.v6.proto.enums',
attribution_model_pb2='google.ads.google_ads.v6.proto.enums',
batch_job_status_pb2='google.ads.google_ads.v6.proto.enums',
bid_modifier_source_pb2='google.ads.google_ads.v6.proto.enums',
bidding_source_pb2='google.ads.google_ads.v6.proto.enums',
bidding_strategy_status_pb2='google.ads.google_ads.v6.proto.enums',
bidding_strategy_type_pb2='google.ads.google_ads.v6.proto.enums',
billing_setup_status_pb2='google.ads.google_ads.v6.proto.enums',
brand_safety_suitability_pb2='google.ads.google_ads.v6.proto.enums',
budget_campaign_association_status_pb2='google.ads.google_ads.v6.proto.enums',
budget_delivery_method_pb2='google.ads.google_ads.v6.proto.enums',
budget_period_pb2='google.ads.google_ads.v6.proto.enums',
budget_status_pb2='google.ads.google_ads.v6.proto.enums',
budget_type_pb2='google.ads.google_ads.v6.proto.enums',
call_conversion_reporting_state_pb2='google.ads.google_ads.v6.proto.enums',
call_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
callout_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
campaign_criterion_status_pb2='google.ads.google_ads.v6.proto.enums',
campaign_draft_status_pb2='google.ads.google_ads.v6.proto.enums',
campaign_experiment_status_pb2='google.ads.google_ads.v6.proto.enums',
campaign_experiment_traffic_split_type_pb2='google.ads.google_ads.v6.proto.enums',
campaign_experiment_type_pb2='google.ads.google_ads.v6.proto.enums',
campaign_serving_status_pb2='google.ads.google_ads.v6.proto.enums',
campaign_shared_set_status_pb2='google.ads.google_ads.v6.proto.enums',
campaign_status_pb2='google.ads.google_ads.v6.proto.enums',
change_client_type_pb2='google.ads.google_ads.v6.proto.enums',
change_event_resource_type_pb2='google.ads.google_ads.v6.proto.enums',
change_status_operation_pb2='google.ads.google_ads.v6.proto.enums',
change_status_resource_type_pb2='google.ads.google_ads.v6.proto.enums',
click_type_pb2='google.ads.google_ads.v6.proto.enums',
combined_audience_status_pb2='google.ads.google_ads.v6.proto.enums',
content_label_type_pb2='google.ads.google_ads.v6.proto.enums',
conversion_action_category_pb2='google.ads.google_ads.v6.proto.enums',
conversion_action_counting_type_pb2='google.ads.google_ads.v6.proto.enums',
conversion_action_status_pb2='google.ads.google_ads.v6.proto.enums',
conversion_action_type_pb2='google.ads.google_ads.v6.proto.enums',
conversion_adjustment_type_pb2='google.ads.google_ads.v6.proto.enums',
conversion_attribution_event_type_pb2='google.ads.google_ads.v6.proto.enums',
conversion_lag_bucket_pb2='google.ads.google_ads.v6.proto.enums',
conversion_or_adjustment_lag_bucket_pb2='google.ads.google_ads.v6.proto.enums',
criterion_category_channel_availability_mode_pb2='google.ads.google_ads.v6.proto.enums',
criterion_category_locale_availability_mode_pb2='google.ads.google_ads.v6.proto.enums',
criterion_system_serving_status_pb2='google.ads.google_ads.v6.proto.enums',
criterion_type_pb2='google.ads.google_ads.v6.proto.enums',
custom_audience_member_type_pb2='google.ads.google_ads.v6.proto.enums',
custom_audience_status_pb2='google.ads.google_ads.v6.proto.enums',
custom_audience_type_pb2='google.ads.google_ads.v6.proto.enums',
custom_interest_member_type_pb2='google.ads.google_ads.v6.proto.enums',
custom_interest_status_pb2='google.ads.google_ads.v6.proto.enums',
custom_interest_type_pb2='google.ads.google_ads.v6.proto.enums',
custom_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
customer_match_upload_key_type_pb2='google.ads.google_ads.v6.proto.enums',
customer_pay_per_conversion_eligibility_failure_reason_pb2='google.ads.google_ads.v6.proto.enums',
data_driven_model_status_pb2='google.ads.google_ads.v6.proto.enums',
day_of_week_pb2='google.ads.google_ads.v6.proto.enums',
device_pb2='google.ads.google_ads.v6.proto.enums',
display_ad_format_setting_pb2='google.ads.google_ads.v6.proto.enums',
display_upload_product_type_pb2='google.ads.google_ads.v6.proto.enums',
distance_bucket_pb2='google.ads.google_ads.v6.proto.enums',
dsa_page_feed_criterion_field_pb2='google.ads.google_ads.v6.proto.enums',
education_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
extension_setting_device_pb2='google.ads.google_ads.v6.proto.enums',
extension_type_pb2='google.ads.google_ads.v6.proto.enums',
external_conversion_source_pb2='google.ads.google_ads.v6.proto.enums',
feed_attribute_type_pb2='google.ads.google_ads.v6.proto.enums',
feed_item_quality_approval_status_pb2='google.ads.google_ads.v6.proto.enums',
feed_item_quality_disapproval_reason_pb2='google.ads.google_ads.v6.proto.enums',
feed_item_set_status_pb2='google.ads.google_ads.v6.proto.enums',
feed_item_set_string_filter_type_pb2='google.ads.google_ads.v6.proto.enums',
feed_item_status_pb2='google.ads.google_ads.v6.proto.enums',
feed_item_target_device_pb2='google.ads.google_ads.v6.proto.enums',
feed_item_target_status_pb2='google.ads.google_ads.v6.proto.enums',
feed_item_target_type_pb2='google.ads.google_ads.v6.proto.enums',
feed_item_validation_status_pb2='google.ads.google_ads.v6.proto.enums',
feed_link_status_pb2='google.ads.google_ads.v6.proto.enums',
feed_mapping_criterion_type_pb2='google.ads.google_ads.v6.proto.enums',
feed_mapping_status_pb2='google.ads.google_ads.v6.proto.enums',
feed_origin_pb2='google.ads.google_ads.v6.proto.enums',
feed_status_pb2='google.ads.google_ads.v6.proto.enums',
flight_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
frequency_cap_event_type_pb2='google.ads.google_ads.v6.proto.enums',
frequency_cap_level_pb2='google.ads.google_ads.v6.proto.enums',
frequency_cap_time_unit_pb2='google.ads.google_ads.v6.proto.enums',
gender_type_pb2='google.ads.google_ads.v6.proto.enums',
geo_target_constant_status_pb2='google.ads.google_ads.v6.proto.enums',
geo_targeting_restriction_pb2='google.ads.google_ads.v6.proto.enums',
geo_targeting_type_pb2='google.ads.google_ads.v6.proto.enums',
google_ads_field_category_pb2='google.ads.google_ads.v6.proto.enums',
google_ads_field_data_type_pb2='google.ads.google_ads.v6.proto.enums',
hotel_date_selection_type_pb2='google.ads.google_ads.v6.proto.enums',
hotel_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
hotel_price_bucket_pb2='google.ads.google_ads.v6.proto.enums',
hotel_rate_type_pb2='google.ads.google_ads.v6.proto.enums',
image_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
income_range_type_pb2='google.ads.google_ads.v6.proto.enums',
interaction_event_type_pb2='google.ads.google_ads.v6.proto.enums',
interaction_type_pb2='google.ads.google_ads.v6.proto.enums',
invoice_type_pb2='google.ads.google_ads.v6.proto.enums',
job_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
keyword_match_type_pb2='google.ads.google_ads.v6.proto.enums',
keyword_plan_competition_level_pb2='google.ads.google_ads.v6.proto.enums',
keyword_plan_forecast_interval_pb2='google.ads.google_ads.v6.proto.enums',
keyword_plan_network_pb2='google.ads.google_ads.v6.proto.enums',
label_status_pb2='google.ads.google_ads.v6.proto.enums',
lead_form_call_to_action_type_pb2='google.ads.google_ads.v6.proto.enums',
lead_form_desired_intent_pb2='google.ads.google_ads.v6.proto.enums',
lead_form_field_user_input_type_pb2='google.ads.google_ads.v6.proto.enums',
lead_form_post_submit_call_to_action_type_pb2='google.ads.google_ads.v6.proto.enums',
legacy_app_install_ad_app_store_pb2='google.ads.google_ads.v6.proto.enums',
linked_account_type_pb2='google.ads.google_ads.v6.proto.enums',
listing_group_type_pb2='google.ads.google_ads.v6.proto.enums',
local_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
location_extension_targeting_criterion_field_pb2='google.ads.google_ads.v6.proto.enums',
location_group_radius_units_pb2='google.ads.google_ads.v6.proto.enums',
location_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
location_source_type_pb2='google.ads.google_ads.v6.proto.enums',
manager_link_status_pb2='google.ads.google_ads.v6.proto.enums',
matching_function_context_type_pb2='google.ads.google_ads.v6.proto.enums',
matching_function_operator_pb2='google.ads.google_ads.v6.proto.enums',
media_type_pb2='google.ads.google_ads.v6.proto.enums',
merchant_center_link_status_pb2='google.ads.google_ads.v6.proto.enums',
message_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
mime_type_pb2='google.ads.google_ads.v6.proto.enums',
minute_of_hour_pb2='google.ads.google_ads.v6.proto.enums',
mobile_app_vendor_pb2='google.ads.google_ads.v6.proto.enums',
mobile_device_type_pb2='google.ads.google_ads.v6.proto.enums',
month_of_year_pb2='google.ads.google_ads.v6.proto.enums',
negative_geo_target_type_pb2='google.ads.google_ads.v6.proto.enums',
offline_user_data_job_failure_reason_pb2='google.ads.google_ads.v6.proto.enums',
offline_user_data_job_status_pb2='google.ads.google_ads.v6.proto.enums',
offline_user_data_job_type_pb2='google.ads.google_ads.v6.proto.enums',
operating_system_version_operator_type_pb2='google.ads.google_ads.v6.proto.enums',
optimization_goal_type_pb2='google.ads.google_ads.v6.proto.enums',
page_one_promoted_strategy_goal_pb2='google.ads.google_ads.v6.proto.enums',
parental_status_type_pb2='google.ads.google_ads.v6.proto.enums',
payment_mode_pb2='google.ads.google_ads.v6.proto.enums',
placeholder_type_pb2='google.ads.google_ads.v6.proto.enums',
placement_type_pb2='google.ads.google_ads.v6.proto.enums',
policy_approval_status_pb2='google.ads.google_ads.v6.proto.enums',
policy_review_status_pb2='google.ads.google_ads.v6.proto.enums',
policy_topic_entry_type_pb2='google.ads.google_ads.v6.proto.enums',
policy_topic_evidence_destination_mismatch_url_type_pb2='google.ads.google_ads.v6.proto.enums',
policy_topic_evidence_destination_not_working_device_pb2='google.ads.google_ads.v6.proto.enums',
policy_topic_evidence_destination_not_working_dns_error_type_pb2='google.ads.google_ads.v6.proto.enums',
positive_geo_target_type_pb2='google.ads.google_ads.v6.proto.enums',
preferred_content_type_pb2='google.ads.google_ads.v6.proto.enums',
price_extension_price_qualifier_pb2='google.ads.google_ads.v6.proto.enums',
price_extension_price_unit_pb2='google.ads.google_ads.v6.proto.enums',
price_extension_type_pb2='google.ads.google_ads.v6.proto.enums',
price_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
product_bidding_category_level_pb2='google.ads.google_ads.v6.proto.enums',
product_bidding_category_status_pb2='google.ads.google_ads.v6.proto.enums',
product_channel_exclusivity_pb2='google.ads.google_ads.v6.proto.enums',
product_channel_pb2='google.ads.google_ads.v6.proto.enums',
product_condition_pb2='google.ads.google_ads.v6.proto.enums',
product_custom_attribute_index_pb2='google.ads.google_ads.v6.proto.enums',
product_type_level_pb2='google.ads.google_ads.v6.proto.enums',
promotion_extension_discount_modifier_pb2='google.ads.google_ads.v6.proto.enums',
promotion_extension_occasion_pb2='google.ads.google_ads.v6.proto.enums',
promotion_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
proximity_radius_units_pb2='google.ads.google_ads.v6.proto.enums',
quality_score_bucket_pb2='google.ads.google_ads.v6.proto.enums',
reach_plan_ad_length_pb2='google.ads.google_ads.v6.proto.enums',
reach_plan_age_range_pb2='google.ads.google_ads.v6.proto.enums',
reach_plan_network_pb2='google.ads.google_ads.v6.proto.enums',
real_estate_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
recommendation_type_pb2='google.ads.google_ads.v6.proto.enums',
resource_change_operation_pb2='google.ads.google_ads.v6.proto.enums',
response_content_type_pb2='google.ads.google_ads.v6.proto.enums',
search_engine_results_page_type_pb2='google.ads.google_ads.v6.proto.enums',
search_term_match_type_pb2='google.ads.google_ads.v6.proto.enums',
search_term_targeting_status_pb2='google.ads.google_ads.v6.proto.enums',
served_asset_field_type_pb2='google.ads.google_ads.v6.proto.enums',
shared_set_status_pb2='google.ads.google_ads.v6.proto.enums',
shared_set_type_pb2='google.ads.google_ads.v6.proto.enums',
simulation_modification_method_pb2='google.ads.google_ads.v6.proto.enums',
simulation_type_pb2='google.ads.google_ads.v6.proto.enums',
sitelink_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
slot_pb2='google.ads.google_ads.v6.proto.enums',
spending_limit_type_pb2='google.ads.google_ads.v6.proto.enums',
structured_snippet_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
summary_row_setting_pb2='google.ads.google_ads.v6.proto.enums',
system_managed_entity_source_pb2='google.ads.google_ads.v6.proto.enums',
target_cpa_opt_in_recommendation_goal_pb2='google.ads.google_ads.v6.proto.enums',
target_impression_share_location_pb2='google.ads.google_ads.v6.proto.enums',
targeting_dimension_pb2='google.ads.google_ads.v6.proto.enums',
time_type_pb2='google.ads.google_ads.v6.proto.enums',
tracking_code_page_format_pb2='google.ads.google_ads.v6.proto.enums',
tracking_code_type_pb2='google.ads.google_ads.v6.proto.enums',
travel_placeholder_field_pb2='google.ads.google_ads.v6.proto.enums',
user_identifier_source_pb2='google.ads.google_ads.v6.proto.enums',
user_interest_taxonomy_type_pb2='google.ads.google_ads.v6.proto.enums',
user_list_access_status_pb2='google.ads.google_ads.v6.proto.enums',
user_list_closing_reason_pb2='google.ads.google_ads.v6.proto.enums',
user_list_combined_rule_operator_pb2='google.ads.google_ads.v6.proto.enums',
user_list_crm_data_source_type_pb2='google.ads.google_ads.v6.proto.enums',
user_list_date_rule_item_operator_pb2='google.ads.google_ads.v6.proto.enums',
user_list_logical_rule_operator_pb2='google.ads.google_ads.v6.proto.enums',
user_list_membership_status_pb2='google.ads.google_ads.v6.proto.enums',
user_list_number_rule_item_operator_pb2='google.ads.google_ads.v6.proto.enums',
user_list_prepopulation_status_pb2='google.ads.google_ads.v6.proto.enums',
user_list_rule_type_pb2='google.ads.google_ads.v6.proto.enums',
user_list_size_range_pb2='google.ads.google_ads.v6.proto.enums',
user_list_string_rule_item_operator_pb2='google.ads.google_ads.v6.proto.enums',
user_list_type_pb2='google.ads.google_ads.v6.proto.enums',
vanity_pharma_display_url_mode_pb2='google.ads.google_ads.v6.proto.enums',
vanity_pharma_text_pb2='google.ads.google_ads.v6.proto.enums',
webpage_condition_operand_pb2='google.ads.google_ads.v6.proto.enums',
webpage_condition_operator_pb2='google.ads.google_ads.v6.proto.enums',
access_invitation_error_pb2='google.ads.google_ads.v6.proto.errors',
account_budget_proposal_error_pb2='google.ads.google_ads.v6.proto.errors',
account_link_error_pb2='google.ads.google_ads.v6.proto.errors',
ad_customizer_error_pb2='google.ads.google_ads.v6.proto.errors',
ad_error_pb2='google.ads.google_ads.v6.proto.errors',
ad_group_ad_error_pb2='google.ads.google_ads.v6.proto.errors',
ad_group_bid_modifier_error_pb2='google.ads.google_ads.v6.proto.errors',
ad_group_criterion_error_pb2='google.ads.google_ads.v6.proto.errors',
ad_group_error_pb2='google.ads.google_ads.v6.proto.errors',
ad_group_feed_error_pb2='google.ads.google_ads.v6.proto.errors',
ad_parameter_error_pb2='google.ads.google_ads.v6.proto.errors',
ad_sharing_error_pb2='google.ads.google_ads.v6.proto.errors',
adx_error_pb2='google.ads.google_ads.v6.proto.errors',
asset_error_pb2='google.ads.google_ads.v6.proto.errors',
asset_link_error_pb2='google.ads.google_ads.v6.proto.errors',
authentication_error_pb2='google.ads.google_ads.v6.proto.errors',
authorization_error_pb2='google.ads.google_ads.v6.proto.errors',
batch_job_error_pb2='google.ads.google_ads.v6.proto.errors',
bidding_error_pb2='google.ads.google_ads.v6.proto.errors',
bidding_strategy_error_pb2='google.ads.google_ads.v6.proto.errors',
billing_setup_error_pb2='google.ads.google_ads.v6.proto.errors',
campaign_budget_error_pb2='google.ads.google_ads.v6.proto.errors',
campaign_criterion_error_pb2='google.ads.google_ads.v6.proto.errors',
campaign_draft_error_pb2='google.ads.google_ads.v6.proto.errors',
campaign_error_pb2='google.ads.google_ads.v6.proto.errors',
campaign_experiment_error_pb2='google.ads.google_ads.v6.proto.errors',
campaign_feed_error_pb2='google.ads.google_ads.v6.proto.errors',
campaign_shared_set_error_pb2='google.ads.google_ads.v6.proto.errors',
change_event_error_pb2='google.ads.google_ads.v6.proto.errors',
change_status_error_pb2='google.ads.google_ads.v6.proto.errors',
collection_size_error_pb2='google.ads.google_ads.v6.proto.errors',
context_error_pb2='google.ads.google_ads.v6.proto.errors',
conversion_action_error_pb2='google.ads.google_ads.v6.proto.errors',
conversion_adjustment_upload_error_pb2='google.ads.google_ads.v6.proto.errors',
conversion_upload_error_pb2='google.ads.google_ads.v6.proto.errors',
country_code_error_pb2='google.ads.google_ads.v6.proto.errors',
criterion_error_pb2='google.ads.google_ads.v6.proto.errors',
currency_code_error_pb2='google.ads.google_ads.v6.proto.errors',
custom_audience_error_pb2='google.ads.google_ads.v6.proto.errors',
custom_interest_error_pb2='google.ads.google_ads.v6.proto.errors',
customer_client_link_error_pb2='google.ads.google_ads.v6.proto.errors',
customer_error_pb2='google.ads.google_ads.v6.proto.errors',
customer_feed_error_pb2='google.ads.google_ads.v6.proto.errors',
customer_manager_link_error_pb2='google.ads.google_ads.v6.proto.errors',
customer_user_access_error_pb2='google.ads.google_ads.v6.proto.errors',
database_error_pb2='google.ads.google_ads.v6.proto.errors',
date_error_pb2='google.ads.google_ads.v6.proto.errors',
date_range_error_pb2='google.ads.google_ads.v6.proto.errors',
distinct_error_pb2='google.ads.google_ads.v6.proto.errors',
enum_error_pb2='google.ads.google_ads.v6.proto.errors',
errors_pb2='google.ads.google_ads.v6.proto.errors',
extension_feed_item_error_pb2='google.ads.google_ads.v6.proto.errors',
extension_setting_error_pb2='google.ads.google_ads.v6.proto.errors',
feed_attribute_reference_error_pb2='google.ads.google_ads.v6.proto.errors',
feed_error_pb2='google.ads.google_ads.v6.proto.errors',
feed_item_error_pb2='google.ads.google_ads.v6.proto.errors',
feed_item_set_error_pb2='google.ads.google_ads.v6.proto.errors',
feed_item_set_link_error_pb2='google.ads.google_ads.v6.proto.errors',
feed_item_target_error_pb2='google.ads.google_ads.v6.proto.errors',
feed_item_validation_error_pb2='google.ads.google_ads.v6.proto.errors',
feed_mapping_error_pb2='google.ads.google_ads.v6.proto.errors',
field_error_pb2='google.ads.google_ads.v6.proto.errors',
field_mask_error_pb2='google.ads.google_ads.v6.proto.errors',
function_error_pb2='google.ads.google_ads.v6.proto.errors',
function_parsing_error_pb2='google.ads.google_ads.v6.proto.errors',
geo_target_constant_suggestion_error_pb2='google.ads.google_ads.v6.proto.errors',
header_error_pb2='google.ads.google_ads.v6.proto.errors',
id_error_pb2='google.ads.google_ads.v6.proto.errors',
image_error_pb2='google.ads.google_ads.v6.proto.errors',
internal_error_pb2='google.ads.google_ads.v6.proto.errors',
invoice_error_pb2='google.ads.google_ads.v6.proto.errors',
keyword_plan_ad_group_error_pb2='google.ads.google_ads.v6.proto.errors',
keyword_plan_ad_group_keyword_error_pb2='google.ads.google_ads.v6.proto.errors',
keyword_plan_campaign_error_pb2='google.ads.google_ads.v6.proto.errors',
keyword_plan_campaign_keyword_error_pb2='google.ads.google_ads.v6.proto.errors',
keyword_plan_error_pb2='google.ads.google_ads.v6.proto.errors',
keyword_plan_idea_error_pb2='google.ads.google_ads.v6.proto.errors',
label_error_pb2='google.ads.google_ads.v6.proto.errors',
language_code_error_pb2='google.ads.google_ads.v6.proto.errors',
list_operation_error_pb2='google.ads.google_ads.v6.proto.errors',
manager_link_error_pb2='google.ads.google_ads.v6.proto.errors',
media_bundle_error_pb2='google.ads.google_ads.v6.proto.errors',
media_file_error_pb2='google.ads.google_ads.v6.proto.errors',
media_upload_error_pb2='google.ads.google_ads.v6.proto.errors',
multiplier_error_pb2='google.ads.google_ads.v6.proto.errors',
mutate_error_pb2='google.ads.google_ads.v6.proto.errors',
new_resource_creation_error_pb2='google.ads.google_ads.v6.proto.errors',
not_allowlisted_error_pb2='google.ads.google_ads.v6.proto.errors',
not_empty_error_pb2='google.ads.google_ads.v6.proto.errors',
null_error_pb2='google.ads.google_ads.v6.proto.errors',
offline_user_data_job_error_pb2='google.ads.google_ads.v6.proto.errors',
operation_access_denied_error_pb2='google.ads.google_ads.v6.proto.errors',
operator_error_pb2='google.ads.google_ads.v6.proto.errors',
partial_failure_error_pb2='google.ads.google_ads.v6.proto.errors',
payments_account_error_pb2='google.ads.google_ads.v6.proto.errors',
policy_finding_error_pb2='google.ads.google_ads.v6.proto.errors',
policy_validation_parameter_error_pb2='google.ads.google_ads.v6.proto.errors',
policy_violation_error_pb2='google.ads.google_ads.v6.proto.errors',
query_error_pb2='google.ads.google_ads.v6.proto.errors',
quota_error_pb2='google.ads.google_ads.v6.proto.errors',
range_error_pb2='google.ads.google_ads.v6.proto.errors',
reach_plan_error_pb2='google.ads.google_ads.v6.proto.errors',
recommendation_error_pb2='google.ads.google_ads.v6.proto.errors',
region_code_error_pb2='google.ads.google_ads.v6.proto.errors',
request_error_pb2='google.ads.google_ads.v6.proto.errors',
resource_access_denied_error_pb2='google.ads.google_ads.v6.proto.errors',
resource_count_limit_exceeded_error_pb2='google.ads.google_ads.v6.proto.errors',
setting_error_pb2='google.ads.google_ads.v6.proto.errors',
shared_criterion_error_pb2='google.ads.google_ads.v6.proto.errors',
shared_set_error_pb2='google.ads.google_ads.v6.proto.errors',
size_limit_error_pb2='google.ads.google_ads.v6.proto.errors',
string_format_error_pb2='google.ads.google_ads.v6.proto.errors',
string_length_error_pb2='google.ads.google_ads.v6.proto.errors',
third_party_app_analytics_link_error_pb2='google.ads.google_ads.v6.proto.errors',
time_zone_error_pb2='google.ads.google_ads.v6.proto.errors',
url_field_error_pb2='google.ads.google_ads.v6.proto.errors',
user_data_error_pb2='google.ads.google_ads.v6.proto.errors',
user_list_error_pb2='google.ads.google_ads.v6.proto.errors',
youtube_video_registration_error_pb2='google.ads.google_ads.v6.proto.errors',
account_budget_pb2='google.ads.google_ads.v6.proto.resources',
account_budget_proposal_pb2='google.ads.google_ads.v6.proto.resources',
account_link_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_ad_asset_view_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_ad_label_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_ad_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_audience_view_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_bid_modifier_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_criterion_label_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_criterion_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_criterion_simulation_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_extension_setting_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_feed_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_label_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_pb2='google.ads.google_ads.v6.proto.resources',
ad_group_simulation_pb2='google.ads.google_ads.v6.proto.resources',
ad_parameter_pb2='google.ads.google_ads.v6.proto.resources',
ad_pb2='google.ads.google_ads.v6.proto.resources',
ad_schedule_view_pb2='google.ads.google_ads.v6.proto.resources',
age_range_view_pb2='google.ads.google_ads.v6.proto.resources',
asset_pb2='google.ads.google_ads.v6.proto.resources',
batch_job_pb2='google.ads.google_ads.v6.proto.resources',
bidding_strategy_pb2='google.ads.google_ads.v6.proto.resources',
billing_setup_pb2='google.ads.google_ads.v6.proto.resources',
campaign_asset_pb2='google.ads.google_ads.v6.proto.resources',
campaign_audience_view_pb2='google.ads.google_ads.v6.proto.resources',
campaign_bid_modifier_pb2='google.ads.google_ads.v6.proto.resources',
campaign_budget_pb2='google.ads.google_ads.v6.proto.resources',
campaign_criterion_pb2='google.ads.google_ads.v6.proto.resources',
campaign_criterion_simulation_pb2='google.ads.google_ads.v6.proto.resources',
campaign_draft_pb2='google.ads.google_ads.v6.proto.resources',
campaign_experiment_pb2='google.ads.google_ads.v6.proto.resources',
campaign_extension_setting_pb2='google.ads.google_ads.v6.proto.resources',
campaign_feed_pb2='google.ads.google_ads.v6.proto.resources',
campaign_label_pb2='google.ads.google_ads.v6.proto.resources',
campaign_pb2='google.ads.google_ads.v6.proto.resources',
campaign_shared_set_pb2='google.ads.google_ads.v6.proto.resources',
carrier_constant_pb2='google.ads.google_ads.v6.proto.resources',
change_event_pb2='google.ads.google_ads.v6.proto.resources',
change_status_pb2='google.ads.google_ads.v6.proto.resources',
click_view_pb2='google.ads.google_ads.v6.proto.resources',
combined_audience_pb2='google.ads.google_ads.v6.proto.resources',
conversion_action_pb2='google.ads.google_ads.v6.proto.resources',
currency_constant_pb2='google.ads.google_ads.v6.proto.resources',
custom_audience_pb2='google.ads.google_ads.v6.proto.resources',
custom_interest_pb2='google.ads.google_ads.v6.proto.resources',
customer_client_link_pb2='google.ads.google_ads.v6.proto.resources',
customer_client_pb2='google.ads.google_ads.v6.proto.resources',
customer_extension_setting_pb2='google.ads.google_ads.v6.proto.resources',
customer_feed_pb2='google.ads.google_ads.v6.proto.resources',
customer_label_pb2='google.ads.google_ads.v6.proto.resources',
customer_manager_link_pb2='google.ads.google_ads.v6.proto.resources',
customer_negative_criterion_pb2='google.ads.google_ads.v6.proto.resources',
customer_pb2='google.ads.google_ads.v6.proto.resources',
customer_user_access_pb2='google.ads.google_ads.v6.proto.resources',
detail_placement_view_pb2='google.ads.google_ads.v6.proto.resources',
display_keyword_view_pb2='google.ads.google_ads.v6.proto.resources',
distance_view_pb2='google.ads.google_ads.v6.proto.resources',
domain_category_pb2='google.ads.google_ads.v6.proto.resources',
dynamic_search_ads_search_term_view_pb2='google.ads.google_ads.v6.proto.resources',
expanded_landing_page_view_pb2='google.ads.google_ads.v6.proto.resources',
extension_feed_item_pb2='google.ads.google_ads.v6.proto.resources',
feed_item_pb2='google.ads.google_ads.v6.proto.resources',
feed_item_set_link_pb2='google.ads.google_ads.v6.proto.resources',
feed_item_set_pb2='google.ads.google_ads.v6.proto.resources',
feed_item_target_pb2='google.ads.google_ads.v6.proto.resources',
feed_mapping_pb2='google.ads.google_ads.v6.proto.resources',
feed_pb2='google.ads.google_ads.v6.proto.resources',
feed_placeholder_view_pb2='google.ads.google_ads.v6.proto.resources',
gender_view_pb2='google.ads.google_ads.v6.proto.resources',
geo_target_constant_pb2='google.ads.google_ads.v6.proto.resources',
geographic_view_pb2='google.ads.google_ads.v6.proto.resources',
google_ads_field_pb2='google.ads.google_ads.v6.proto.resources',
group_placement_view_pb2='google.ads.google_ads.v6.proto.resources',
hotel_group_view_pb2='google.ads.google_ads.v6.proto.resources',
hotel_performance_view_pb2='google.ads.google_ads.v6.proto.resources',
income_range_view_pb2='google.ads.google_ads.v6.proto.resources',
invoice_pb2='google.ads.google_ads.v6.proto.resources',
keyword_plan_ad_group_keyword_pb2='google.ads.google_ads.v6.proto.resources',
keyword_plan_ad_group_pb2='google.ads.google_ads.v6.proto.resources',
keyword_plan_campaign_keyword_pb2='google.ads.google_ads.v6.proto.resources',
keyword_plan_campaign_pb2='google.ads.google_ads.v6.proto.resources',
keyword_plan_pb2='google.ads.google_ads.v6.proto.resources',
keyword_view_pb2='google.ads.google_ads.v6.proto.resources',
label_pb2='google.ads.google_ads.v6.proto.resources',
landing_page_view_pb2='google.ads.google_ads.v6.proto.resources',
language_constant_pb2='google.ads.google_ads.v6.proto.resources',
location_view_pb2='google.ads.google_ads.v6.proto.resources',
managed_placement_view_pb2='google.ads.google_ads.v6.proto.resources',
media_file_pb2='google.ads.google_ads.v6.proto.resources',
merchant_center_link_pb2='google.ads.google_ads.v6.proto.resources',
mobile_app_category_constant_pb2='google.ads.google_ads.v6.proto.resources',
mobile_device_constant_pb2='google.ads.google_ads.v6.proto.resources',
offline_user_data_job_pb2='google.ads.google_ads.v6.proto.resources',
operating_system_version_constant_pb2='google.ads.google_ads.v6.proto.resources',
paid_organic_search_term_view_pb2='google.ads.google_ads.v6.proto.resources',
parental_status_view_pb2='google.ads.google_ads.v6.proto.resources',
payments_account_pb2='google.ads.google_ads.v6.proto.resources',
product_bidding_category_constant_pb2='google.ads.google_ads.v6.proto.resources',
product_group_view_pb2='google.ads.google_ads.v6.proto.resources',
recommendation_pb2='google.ads.google_ads.v6.proto.resources',
remarketing_action_pb2='google.ads.google_ads.v6.proto.resources',
search_term_view_pb2='google.ads.google_ads.v6.proto.resources',
shared_criterion_pb2='google.ads.google_ads.v6.proto.resources',
shared_set_pb2='google.ads.google_ads.v6.proto.resources',
shopping_performance_view_pb2='google.ads.google_ads.v6.proto.resources',
third_party_app_analytics_link_pb2='google.ads.google_ads.v6.proto.resources',
topic_constant_pb2='google.ads.google_ads.v6.proto.resources',
topic_view_pb2='google.ads.google_ads.v6.proto.resources',
user_interest_pb2='google.ads.google_ads.v6.proto.resources',
user_list_pb2='google.ads.google_ads.v6.proto.resources',
user_location_view_pb2='google.ads.google_ads.v6.proto.resources',
video_pb2='google.ads.google_ads.v6.proto.resources',
account_budget_proposal_service_pb2='google.ads.google_ads.v6.proto.services',
account_budget_service_pb2='google.ads.google_ads.v6.proto.services',
account_link_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_ad_asset_view_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_ad_label_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_ad_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_audience_view_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_bid_modifier_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_criterion_label_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_criterion_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_criterion_simulation_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_extension_setting_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_feed_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_label_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_service_pb2='google.ads.google_ads.v6.proto.services',
ad_group_simulation_service_pb2='google.ads.google_ads.v6.proto.services',
ad_parameter_service_pb2='google.ads.google_ads.v6.proto.services',
ad_schedule_view_service_pb2='google.ads.google_ads.v6.proto.services',
ad_service_pb2='google.ads.google_ads.v6.proto.services',
age_range_view_service_pb2='google.ads.google_ads.v6.proto.services',
asset_service_pb2='google.ads.google_ads.v6.proto.services',
batch_job_service_pb2='google.ads.google_ads.v6.proto.services',
bidding_strategy_service_pb2='google.ads.google_ads.v6.proto.services',
billing_setup_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_asset_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_audience_view_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_bid_modifier_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_budget_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_criterion_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_criterion_simulation_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_draft_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_experiment_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_extension_setting_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_feed_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_label_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_service_pb2='google.ads.google_ads.v6.proto.services',
campaign_shared_set_service_pb2='google.ads.google_ads.v6.proto.services',
carrier_constant_service_pb2='google.ads.google_ads.v6.proto.services',
change_status_service_pb2='google.ads.google_ads.v6.proto.services',
click_view_service_pb2='google.ads.google_ads.v6.proto.services',
combined_audience_service_pb2='google.ads.google_ads.v6.proto.services',
conversion_action_service_pb2='google.ads.google_ads.v6.proto.services',
conversion_adjustment_upload_service_pb2='google.ads.google_ads.v6.proto.services',
conversion_upload_service_pb2='google.ads.google_ads.v6.proto.services',
currency_constant_service_pb2='google.ads.google_ads.v6.proto.services',
custom_audience_service_pb2='google.ads.google_ads.v6.proto.services',
custom_interest_service_pb2='google.ads.google_ads.v6.proto.services',
customer_client_link_service_pb2='google.ads.google_ads.v6.proto.services',
customer_client_service_pb2='google.ads.google_ads.v6.proto.services',
customer_extension_setting_service_pb2='google.ads.google_ads.v6.proto.services',
customer_feed_service_pb2='google.ads.google_ads.v6.proto.services',
customer_label_service_pb2='google.ads.google_ads.v6.proto.services',
customer_manager_link_service_pb2='google.ads.google_ads.v6.proto.services',
customer_negative_criterion_service_pb2='google.ads.google_ads.v6.proto.services',
customer_service_pb2='google.ads.google_ads.v6.proto.services',
customer_user_access_service_pb2='google.ads.google_ads.v6.proto.services',
detail_placement_view_service_pb2='google.ads.google_ads.v6.proto.services',
display_keyword_view_service_pb2='google.ads.google_ads.v6.proto.services',
distance_view_service_pb2='google.ads.google_ads.v6.proto.services',
domain_category_service_pb2='google.ads.google_ads.v6.proto.services',
dynamic_search_ads_search_term_view_service_pb2='google.ads.google_ads.v6.proto.services',
expanded_landing_page_view_service_pb2='google.ads.google_ads.v6.proto.services',
extension_feed_item_service_pb2='google.ads.google_ads.v6.proto.services',
feed_item_service_pb2='google.ads.google_ads.v6.proto.services',
feed_item_set_link_service_pb2='google.ads.google_ads.v6.proto.services',
feed_item_set_service_pb2='google.ads.google_ads.v6.proto.services',
feed_item_target_service_pb2='google.ads.google_ads.v6.proto.services',
feed_mapping_service_pb2='google.ads.google_ads.v6.proto.services',
feed_placeholder_view_service_pb2='google.ads.google_ads.v6.proto.services',
feed_service_pb2='google.ads.google_ads.v6.proto.services',
gender_view_service_pb2='google.ads.google_ads.v6.proto.services',
geo_target_constant_service_pb2='google.ads.google_ads.v6.proto.services',
geographic_view_service_pb2='google.ads.google_ads.v6.proto.services',
google_ads_field_service_pb2='google.ads.google_ads.v6.proto.services',
google_ads_service_pb2='google.ads.google_ads.v6.proto.services',
group_placement_view_service_pb2='google.ads.google_ads.v6.proto.services',
hotel_group_view_service_pb2='google.ads.google_ads.v6.proto.services',
hotel_performance_view_service_pb2='google.ads.google_ads.v6.proto.services',
income_range_view_service_pb2='google.ads.google_ads.v6.proto.services',
invoice_service_pb2='google.ads.google_ads.v6.proto.services',
keyword_plan_ad_group_keyword_service_pb2='google.ads.google_ads.v6.proto.services',
keyword_plan_ad_group_service_pb2='google.ads.google_ads.v6.proto.services',
keyword_plan_campaign_keyword_service_pb2='google.ads.google_ads.v6.proto.services',
keyword_plan_campaign_service_pb2='google.ads.google_ads.v6.proto.services',
keyword_plan_idea_service_pb2='google.ads.google_ads.v6.proto.services',
keyword_plan_service_pb2='google.ads.google_ads.v6.proto.services',
keyword_view_service_pb2='google.ads.google_ads.v6.proto.services',
label_service_pb2='google.ads.google_ads.v6.proto.services',
landing_page_view_service_pb2='google.ads.google_ads.v6.proto.services',
language_constant_service_pb2='google.ads.google_ads.v6.proto.services',
location_view_service_pb2='google.ads.google_ads.v6.proto.services',
managed_placement_view_service_pb2='google.ads.google_ads.v6.proto.services',
media_file_service_pb2='google.ads.google_ads.v6.proto.services',
merchant_center_link_service_pb2='google.ads.google_ads.v6.proto.services',
mobile_app_category_constant_service_pb2='google.ads.google_ads.v6.proto.services',
mobile_device_constant_service_pb2='google.ads.google_ads.v6.proto.services',
offline_user_data_job_service_pb2='google.ads.google_ads.v6.proto.services',
operating_system_version_constant_service_pb2='google.ads.google_ads.v6.proto.services',
paid_organic_search_term_view_service_pb2='google.ads.google_ads.v6.proto.services',
parental_status_view_service_pb2='google.ads.google_ads.v6.proto.services',
payments_account_service_pb2='google.ads.google_ads.v6.proto.services',
product_bidding_category_constant_service_pb2='google.ads.google_ads.v6.proto.services',
product_group_view_service_pb2='google.ads.google_ads.v6.proto.services',
reach_plan_service_pb2='google.ads.google_ads.v6.proto.services',
recommendation_service_pb2='google.ads.google_ads.v6.proto.services',
remarketing_action_service_pb2='google.ads.google_ads.v6.proto.services',
search_term_view_service_pb2='google.ads.google_ads.v6.proto.services',
shared_criterion_service_pb2='google.ads.google_ads.v6.proto.services',
shared_set_service_pb2='google.ads.google_ads.v6.proto.services',
shopping_performance_view_service_pb2='google.ads.google_ads.v6.proto.services',
third_party_app_analytics_link_service_pb2='google.ads.google_ads.v6.proto.services',
topic_constant_service_pb2='google.ads.google_ads.v6.proto.services',
topic_view_service_pb2='google.ads.google_ads.v6.proto.services',
user_data_service_pb2='google.ads.google_ads.v6.proto.services',
user_interest_service_pb2='google.ads.google_ads.v6.proto.services',
user_list_service_pb2='google.ads.google_ads.v6.proto.services',
user_location_view_service_pb2='google.ads.google_ads.v6.proto.services',
video_service_pb2='google.ads.google_ads.v6.proto.services',
operations_pb2='google.longrunning',
any_pb2='google.protobuf',
empty_pb2='google.protobuf',
field_mask_pb2='google.protobuf',
wrappers_pb2='google.protobuf',
status_pb2='google.rpc',
)
_lazy_class_to_package_map = dict(
AccessInvitationErrorEnum='google.ads.google_ads.v6.proto.errors.access_invitation_error_pb2',
AccessReasonEnum='google.ads.google_ads.v6.proto.enums.access_reason_pb2',
AccessRoleEnum='google.ads.google_ads.v6.proto.enums.access_role_pb2',
AccountBudget='google.ads.google_ads.v6.proto.resources.account_budget_pb2',
AccountBudgetProposal='google.ads.google_ads.v6.proto.resources.account_budget_proposal_pb2',
AccountBudgetProposalErrorEnum='google.ads.google_ads.v6.proto.errors.account_budget_proposal_error_pb2',
AccountBudgetProposalOperation='google.ads.google_ads.v6.proto.services.account_budget_proposal_service_pb2',
AccountBudgetProposalStatusEnum='google.ads.google_ads.v6.proto.enums.account_budget_proposal_status_pb2',
AccountBudgetProposalTypeEnum='google.ads.google_ads.v6.proto.enums.account_budget_proposal_type_pb2',
AccountBudgetStatusEnum='google.ads.google_ads.v6.proto.enums.account_budget_status_pb2',
AccountLink='google.ads.google_ads.v6.proto.resources.account_link_pb2',
AccountLinkErrorEnum='google.ads.google_ads.v6.proto.errors.account_link_error_pb2',
AccountLinkOperation='google.ads.google_ads.v6.proto.services.account_link_service_pb2',
AccountLinkStatusEnum='google.ads.google_ads.v6.proto.enums.account_link_status_pb2',
Ad='google.ads.google_ads.v6.proto.resources.ad_pb2',
AdAssetPolicySummary='google.ads.google_ads.v6.proto.common.asset_policy_pb2',
AdCustomizerErrorEnum='google.ads.google_ads.v6.proto.errors.ad_customizer_error_pb2',
AdCustomizerPlaceholderFieldEnum='google.ads.google_ads.v6.proto.enums.ad_customizer_placeholder_field_pb2',
AdDestinationTypeEnum='google.ads.google_ads.v6.proto.enums.ad_destination_type_pb2',
AdErrorEnum='google.ads.google_ads.v6.proto.errors.ad_error_pb2',
AdGroup='google.ads.google_ads.v6.proto.resources.ad_group_pb2',
AdGroupAd='google.ads.google_ads.v6.proto.resources.ad_group_ad_pb2',
AdGroupAdAssetPolicySummary='google.ads.google_ads.v6.proto.resources.ad_group_ad_asset_view_pb2',
AdGroupAdAssetView='google.ads.google_ads.v6.proto.resources.ad_group_ad_asset_view_pb2',
AdGroupAdErrorEnum='google.ads.google_ads.v6.proto.errors.ad_group_ad_error_pb2',
AdGroupAdLabel='google.ads.google_ads.v6.proto.resources.ad_group_ad_label_pb2',
AdGroupAdLabelOperation='google.ads.google_ads.v6.proto.services.ad_group_ad_label_service_pb2',
AdGroupAdOperation='google.ads.google_ads.v6.proto.services.ad_group_ad_service_pb2',
AdGroupAdPolicySummary='google.ads.google_ads.v6.proto.resources.ad_group_ad_pb2',
AdGroupAdRotationModeEnum='google.ads.google_ads.v6.proto.enums.ad_group_ad_rotation_mode_pb2',
AdGroupAdStatusEnum='google.ads.google_ads.v6.proto.enums.ad_group_ad_status_pb2',
AdGroupAudienceView='google.ads.google_ads.v6.proto.resources.ad_group_audience_view_pb2',
AdGroupBidModifier='google.ads.google_ads.v6.proto.resources.ad_group_bid_modifier_pb2',
AdGroupBidModifierErrorEnum='google.ads.google_ads.v6.proto.errors.ad_group_bid_modifier_error_pb2',
AdGroupBidModifierOperation='google.ads.google_ads.v6.proto.services.ad_group_bid_modifier_service_pb2',
AdGroupCriterion='google.ads.google_ads.v6.proto.resources.ad_group_criterion_pb2',
AdGroupCriterionApprovalStatusEnum='google.ads.google_ads.v6.proto.enums.ad_group_criterion_approval_status_pb2',
AdGroupCriterionErrorEnum='google.ads.google_ads.v6.proto.errors.ad_group_criterion_error_pb2',
AdGroupCriterionLabel='google.ads.google_ads.v6.proto.resources.ad_group_criterion_label_pb2',
AdGroupCriterionLabelOperation='google.ads.google_ads.v6.proto.services.ad_group_criterion_label_service_pb2',
AdGroupCriterionOperation='google.ads.google_ads.v6.proto.services.ad_group_criterion_service_pb2',
AdGroupCriterionSimulation='google.ads.google_ads.v6.proto.resources.ad_group_criterion_simulation_pb2',
AdGroupCriterionStatusEnum='google.ads.google_ads.v6.proto.enums.ad_group_criterion_status_pb2',
AdGroupErrorEnum='google.ads.google_ads.v6.proto.errors.ad_group_error_pb2',
AdGroupExtensionSetting='google.ads.google_ads.v6.proto.resources.ad_group_extension_setting_pb2',
AdGroupExtensionSettingOperation='google.ads.google_ads.v6.proto.services.ad_group_extension_setting_service_pb2',
AdGroupFeed='google.ads.google_ads.v6.proto.resources.ad_group_feed_pb2',
AdGroupFeedErrorEnum='google.ads.google_ads.v6.proto.errors.ad_group_feed_error_pb2',
AdGroupFeedOperation='google.ads.google_ads.v6.proto.services.ad_group_feed_service_pb2',
AdGroupLabel='google.ads.google_ads.v6.proto.resources.ad_group_label_pb2',
AdGroupLabelOperation='google.ads.google_ads.v6.proto.services.ad_group_label_service_pb2',
AdGroupOperation='google.ads.google_ads.v6.proto.services.ad_group_service_pb2',
AdGroupSimulation='google.ads.google_ads.v6.proto.resources.ad_group_simulation_pb2',
AdGroupStatusEnum='google.ads.google_ads.v6.proto.enums.ad_group_status_pb2',
AdGroupTypeEnum='google.ads.google_ads.v6.proto.enums.ad_group_type_pb2',
AdImageAsset='google.ads.google_ads.v6.proto.common.ad_asset_pb2',
AdMediaBundleAsset='google.ads.google_ads.v6.proto.common.ad_asset_pb2',
AdNetworkTypeEnum='google.ads.google_ads.v6.proto.enums.ad_network_type_pb2',
AdOperation='google.ads.google_ads.v6.proto.services.ad_service_pb2',
AdParameter='google.ads.google_ads.v6.proto.resources.ad_parameter_pb2',
AdParameterErrorEnum='google.ads.google_ads.v6.proto.errors.ad_parameter_error_pb2',
AdParameterOperation='google.ads.google_ads.v6.proto.services.ad_parameter_service_pb2',
AdScheduleInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
AdScheduleView='google.ads.google_ads.v6.proto.resources.ad_schedule_view_pb2',
AdServingOptimizationStatusEnum='google.ads.google_ads.v6.proto.enums.ad_serving_optimization_status_pb2',
AdSharingErrorEnum='google.ads.google_ads.v6.proto.errors.ad_sharing_error_pb2',
AdStrengthEnum='google.ads.google_ads.v6.proto.enums.ad_strength_pb2',
AdTextAsset='google.ads.google_ads.v6.proto.common.ad_asset_pb2',
AdTypeEnum='google.ads.google_ads.v6.proto.enums.ad_type_pb2',
AdVideoAsset='google.ads.google_ads.v6.proto.common.ad_asset_pb2',
AddBatchJobOperationsRequest='google.ads.google_ads.v6.proto.services.batch_job_service_pb2',
AddBatchJobOperationsResponse='google.ads.google_ads.v6.proto.services.batch_job_service_pb2',
AddOfflineUserDataJobOperationsRequest='google.ads.google_ads.v6.proto.services.offline_user_data_job_service_pb2',
AddOfflineUserDataJobOperationsResponse='google.ads.google_ads.v6.proto.services.offline_user_data_job_service_pb2',
AddressInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
AdvertisingChannelSubTypeEnum='google.ads.google_ads.v6.proto.enums.advertising_channel_sub_type_pb2',
AdvertisingChannelTypeEnum='google.ads.google_ads.v6.proto.enums.advertising_channel_type_pb2',
AdxErrorEnum='google.ads.google_ads.v6.proto.errors.adx_error_pb2',
AffiliateLocationFeedItem='google.ads.google_ads.v6.proto.common.extensions_pb2',
AffiliateLocationFeedRelationshipTypeEnum='google.ads.google_ads.v6.proto.enums.affiliate_location_feed_relationship_type_pb2',
AffiliateLocationPlaceholderFieldEnum='google.ads.google_ads.v6.proto.enums.affiliate_location_placeholder_field_pb2',
AgeRangeInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
AgeRangeTypeEnum='google.ads.google_ads.v6.proto.enums.age_range_type_pb2',
AgeRangeView='google.ads.google_ads.v6.proto.resources.age_range_view_pb2',
AppAdInfo='google.ads.google_ads.v6.proto.common.ad_type_infos_pb2',
AppCampaignAppStoreEnum='google.ads.google_ads.v6.proto.enums.app_campaign_app_store_pb2',
AppCampaignBiddingStrategyGoalTypeEnum='google.ads.google_ads.v6.proto.enums.app_campaign_bidding_strategy_goal_type_pb2',
AppEngagementAdInfo='google.ads.google_ads.v6.proto.common.ad_type_infos_pb2',
AppFeedItem='google.ads.google_ads.v6.proto.common.extensions_pb2',
AppPaymentModelInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
AppPaymentModelTypeEnum='google.ads.google_ads.v6.proto.enums.app_payment_model_type_pb2',
AppPlaceholderFieldEnum='google.ads.google_ads.v6.proto.enums.app_placeholder_field_pb2',
AppStoreEnum='google.ads.google_ads.v6.proto.enums.app_store_pb2',
AppUrlOperatingSystemTypeEnum='google.ads.google_ads.v6.proto.enums.app_url_operating_system_type_pb2',
ApplyRecommendationOperation='google.ads.google_ads.v6.proto.services.recommendation_service_pb2',
ApplyRecommendationRequest='google.ads.google_ads.v6.proto.services.recommendation_service_pb2',
ApplyRecommendationResponse='google.ads.google_ads.v6.proto.services.recommendation_service_pb2',
ApplyRecommendationResult='google.ads.google_ads.v6.proto.services.recommendation_service_pb2',
Asset='google.ads.google_ads.v6.proto.resources.asset_pb2',
AssetErrorEnum='google.ads.google_ads.v6.proto.errors.asset_error_pb2',
AssetFieldTypeEnum='google.ads.google_ads.v6.proto.enums.asset_field_type_pb2',
AssetLinkErrorEnum='google.ads.google_ads.v6.proto.errors.asset_link_error_pb2',
AssetLinkStatusEnum='google.ads.google_ads.v6.proto.enums.asset_link_status_pb2',
AssetOperation='google.ads.google_ads.v6.proto.services.asset_service_pb2',
AssetPerformanceLabelEnum='google.ads.google_ads.v6.proto.enums.asset_performance_label_pb2',
AssetPolicySummary='google.ads.google_ads.v6.proto.resources.asset_pb2',
AssetTypeEnum='google.ads.google_ads.v6.proto.enums.asset_type_pb2',
AttributeFieldMapping='google.ads.google_ads.v6.proto.resources.feed_mapping_pb2',
AttributionModelEnum='google.ads.google_ads.v6.proto.enums.attribution_model_pb2',
AuthenticationErrorEnum='google.ads.google_ads.v6.proto.errors.authentication_error_pb2',
AuthorizationErrorEnum='google.ads.google_ads.v6.proto.errors.authorization_error_pb2',
BasicUserListInfo='google.ads.google_ads.v6.proto.common.user_lists_pb2',
BatchJob='google.ads.google_ads.v6.proto.resources.batch_job_pb2',
BatchJobErrorEnum='google.ads.google_ads.v6.proto.errors.batch_job_error_pb2',
BatchJobOperation='google.ads.google_ads.v6.proto.services.batch_job_service_pb2',
BatchJobResult='google.ads.google_ads.v6.proto.services.batch_job_service_pb2',
BatchJobStatusEnum='google.ads.google_ads.v6.proto.enums.batch_job_status_pb2',
BidModifierSimulationPoint='google.ads.google_ads.v6.proto.common.simulation_pb2',
BidModifierSimulationPointList='google.ads.google_ads.v6.proto.common.simulation_pb2',
BidModifierSourceEnum='google.ads.google_ads.v6.proto.enums.bid_modifier_source_pb2',
BiddingErrorEnum='google.ads.google_ads.v6.proto.errors.bidding_error_pb2',
BiddingSourceEnum='google.ads.google_ads.v6.proto.enums.bidding_source_pb2',
BiddingStrategy='google.ads.google_ads.v6.proto.resources.bidding_strategy_pb2',
BiddingStrategyErrorEnum='google.ads.google_ads.v6.proto.errors.bidding_strategy_error_pb2',
BiddingStrategyOperation='google.ads.google_ads.v6.proto.services.bidding_strategy_service_pb2',
BiddingStrategyStatusEnum='google.ads.google_ads.v6.proto.enums.bidding_strategy_status_pb2',
BiddingStrategyTypeEnum='google.ads.google_ads.v6.proto.enums.bidding_strategy_type_pb2',
BillingSetup='google.ads.google_ads.v6.proto.resources.billing_setup_pb2',
BillingSetupErrorEnum='google.ads.google_ads.v6.proto.errors.billing_setup_error_pb2',
BillingSetupOperation='google.ads.google_ads.v6.proto.services.billing_setup_service_pb2',
BillingSetupStatusEnum='google.ads.google_ads.v6.proto.enums.billing_setup_status_pb2',
BookOnGoogleAsset='google.ads.google_ads.v6.proto.common.asset_types_pb2',
BrandSafetySuitabilityEnum='google.ads.google_ads.v6.proto.enums.brand_safety_suitability_pb2',
BudgetCampaignAssociationStatus='google.ads.google_ads.v6.proto.common.segments_pb2',
BudgetCampaignAssociationStatusEnum='google.ads.google_ads.v6.proto.enums.budget_campaign_association_status_pb2',
BudgetDeliveryMethodEnum='google.ads.google_ads.v6.proto.enums.budget_delivery_method_pb2',
BudgetPeriodEnum='google.ads.google_ads.v6.proto.enums.budget_period_pb2',
BudgetStatusEnum='google.ads.google_ads.v6.proto.enums.budget_status_pb2',
BudgetTypeEnum='google.ads.google_ads.v6.proto.enums.budget_type_pb2',
BusinessNameFilter='google.ads.google_ads.v6.proto.common.feed_item_set_filter_type_infos_pb2',
CallConversion='google.ads.google_ads.v6.proto.services.conversion_upload_service_pb2',
CallConversionReportingStateEnum='google.ads.google_ads.v6.proto.enums.call_conversion_reporting_state_pb2',
CallConversionResult='google.ads.google_ads.v6.proto.services.conversion_upload_service_pb2',
CallFeedItem='google.ads.google_ads.v6.proto.common.extensions_pb2',
CallOnlyAdInfo='google.ads.google_ads.v6.proto.common.ad_type_infos_pb2',
CallPlaceholderFieldEnum='google.ads.google_ads.v6.proto.enums.call_placeholder_field_pb2',
CallReportingSetting='google.ads.google_ads.v6.proto.resources.customer_pb2',
CalloutFeedItem='google.ads.google_ads.v6.proto.common.extensions_pb2',
CalloutPlaceholderFieldEnum='google.ads.google_ads.v6.proto.enums.callout_placeholder_field_pb2',
Campaign='google.ads.google_ads.v6.proto.resources.campaign_pb2',
CampaignAsset='google.ads.google_ads.v6.proto.resources.campaign_asset_pb2',
CampaignAssetOperation='google.ads.google_ads.v6.proto.services.campaign_asset_service_pb2',
CampaignAudienceView='google.ads.google_ads.v6.proto.resources.campaign_audience_view_pb2',
CampaignBidModifier='google.ads.google_ads.v6.proto.resources.campaign_bid_modifier_pb2',
CampaignBidModifierOperation='google.ads.google_ads.v6.proto.services.campaign_bid_modifier_service_pb2',
CampaignBudget='google.ads.google_ads.v6.proto.resources.campaign_budget_pb2',
CampaignBudgetErrorEnum='google.ads.google_ads.v6.proto.errors.campaign_budget_error_pb2',
CampaignBudgetOperation='google.ads.google_ads.v6.proto.services.campaign_budget_service_pb2',
CampaignCriterion='google.ads.google_ads.v6.proto.resources.campaign_criterion_pb2',
CampaignCriterionErrorEnum='google.ads.google_ads.v6.proto.errors.campaign_criterion_error_pb2',
CampaignCriterionOperation='google.ads.google_ads.v6.proto.services.campaign_criterion_service_pb2',
CampaignCriterionSimulation='google.ads.google_ads.v6.proto.resources.campaign_criterion_simulation_pb2',
CampaignCriterionStatusEnum='google.ads.google_ads.v6.proto.enums.campaign_criterion_status_pb2',
CampaignDraft='google.ads.google_ads.v6.proto.resources.campaign_draft_pb2',
CampaignDraftErrorEnum='google.ads.google_ads.v6.proto.errors.campaign_draft_error_pb2',
CampaignDraftOperation='google.ads.google_ads.v6.proto.services.campaign_draft_service_pb2',
CampaignDraftStatusEnum='google.ads.google_ads.v6.proto.enums.campaign_draft_status_pb2',
CampaignDuration='google.ads.google_ads.v6.proto.services.reach_plan_service_pb2',
CampaignErrorEnum='google.ads.google_ads.v6.proto.errors.campaign_error_pb2',
CampaignExperiment='google.ads.google_ads.v6.proto.resources.campaign_experiment_pb2',
CampaignExperimentErrorEnum='google.ads.google_ads.v6.proto.errors.campaign_experiment_error_pb2',
CampaignExperimentOperation='google.ads.google_ads.v6.proto.services.campaign_experiment_service_pb2',
CampaignExperimentStatusEnum='google.ads.google_ads.v6.proto.enums.campaign_experiment_status_pb2',
CampaignExperimentTrafficSplitTypeEnum='google.ads.google_ads.v6.proto.enums.campaign_experiment_traffic_split_type_pb2',
CampaignExperimentTypeEnum='google.ads.google_ads.v6.proto.enums.campaign_experiment_type_pb2',
CampaignExtensionSetting='google.ads.google_ads.v6.proto.resources.campaign_extension_setting_pb2',
CampaignExtensionSettingOperation='google.ads.google_ads.v6.proto.services.campaign_extension_setting_service_pb2',
CampaignFeed='google.ads.google_ads.v6.proto.resources.campaign_feed_pb2',
CampaignFeedErrorEnum='google.ads.google_ads.v6.proto.errors.campaign_feed_error_pb2',
CampaignFeedOperation='google.ads.google_ads.v6.proto.services.campaign_feed_service_pb2',
CampaignLabel='google.ads.google_ads.v6.proto.resources.campaign_label_pb2',
CampaignLabelOperation='google.ads.google_ads.v6.proto.services.campaign_label_service_pb2',
CampaignOperation='google.ads.google_ads.v6.proto.services.campaign_service_pb2',
CampaignServingStatusEnum='google.ads.google_ads.v6.proto.enums.campaign_serving_status_pb2',
CampaignSharedSet='google.ads.google_ads.v6.proto.resources.campaign_shared_set_pb2',
CampaignSharedSetErrorEnum='google.ads.google_ads.v6.proto.errors.campaign_shared_set_error_pb2',
CampaignSharedSetOperation='google.ads.google_ads.v6.proto.services.campaign_shared_set_service_pb2',
CampaignSharedSetStatusEnum='google.ads.google_ads.v6.proto.enums.campaign_shared_set_status_pb2',
CampaignStatusEnum='google.ads.google_ads.v6.proto.enums.campaign_status_pb2',
CarrierConstant='google.ads.google_ads.v6.proto.resources.carrier_constant_pb2',
CarrierInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
ChangeClientTypeEnum='google.ads.google_ads.v6.proto.enums.change_client_type_pb2',
ChangeEvent='google.ads.google_ads.v6.proto.resources.change_event_pb2',
ChangeEventErrorEnum='google.ads.google_ads.v6.proto.errors.change_event_error_pb2',
ChangeEventResourceTypeEnum='google.ads.google_ads.v6.proto.enums.change_event_resource_type_pb2',
ChangeStatus='google.ads.google_ads.v6.proto.resources.change_status_pb2',
ChangeStatusErrorEnum='google.ads.google_ads.v6.proto.errors.change_status_error_pb2',
ChangeStatusOperationEnum='google.ads.google_ads.v6.proto.enums.change_status_operation_pb2',
ChangeStatusResourceTypeEnum='google.ads.google_ads.v6.proto.enums.change_status_resource_type_pb2',
ClickConversion='google.ads.google_ads.v6.proto.services.conversion_upload_service_pb2',
ClickConversionResult='google.ads.google_ads.v6.proto.services.conversion_upload_service_pb2',
ClickLocation='google.ads.google_ads.v6.proto.common.click_location_pb2',
ClickTypeEnum='google.ads.google_ads.v6.proto.enums.click_type_pb2',
ClickView='google.ads.google_ads.v6.proto.resources.click_view_pb2',
CollectionSizeErrorEnum='google.ads.google_ads.v6.proto.errors.collection_size_error_pb2',
CombinedAudience='google.ads.google_ads.v6.proto.resources.combined_audience_pb2',
CombinedAudienceInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
CombinedAudienceStatusEnum='google.ads.google_ads.v6.proto.enums.combined_audience_status_pb2',
CombinedRuleUserListInfo='google.ads.google_ads.v6.proto.common.user_lists_pb2',
Commission='google.ads.google_ads.v6.proto.common.bidding_pb2',
ContentLabelInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
ContentLabelTypeEnum='google.ads.google_ads.v6.proto.enums.content_label_type_pb2',
ContextErrorEnum='google.ads.google_ads.v6.proto.errors.context_error_pb2',
ConversionAction='google.ads.google_ads.v6.proto.resources.conversion_action_pb2',
ConversionActionCategoryEnum='google.ads.google_ads.v6.proto.enums.conversion_action_category_pb2',
ConversionActionCountingTypeEnum='google.ads.google_ads.v6.proto.enums.conversion_action_counting_type_pb2',
ConversionActionErrorEnum='google.ads.google_ads.v6.proto.errors.conversion_action_error_pb2',
ConversionActionOperation='google.ads.google_ads.v6.proto.services.conversion_action_service_pb2',
ConversionActionStatusEnum='google.ads.google_ads.v6.proto.enums.conversion_action_status_pb2',
ConversionActionTypeEnum='google.ads.google_ads.v6.proto.enums.conversion_action_type_pb2',
ConversionAdjustment='google.ads.google_ads.v6.proto.services.conversion_adjustment_upload_service_pb2',
ConversionAdjustmentResult='google.ads.google_ads.v6.proto.services.conversion_adjustment_upload_service_pb2',
ConversionAdjustmentTypeEnum='google.ads.google_ads.v6.proto.enums.conversion_adjustment_type_pb2',
ConversionAdjustmentUploadErrorEnum='google.ads.google_ads.v6.proto.errors.conversion_adjustment_upload_error_pb2',
ConversionAttributionEventTypeEnum='google.ads.google_ads.v6.proto.enums.conversion_attribution_event_type_pb2',
ConversionLagBucketEnum='google.ads.google_ads.v6.proto.enums.conversion_lag_bucket_pb2',
ConversionOrAdjustmentLagBucketEnum='google.ads.google_ads.v6.proto.enums.conversion_or_adjustment_lag_bucket_pb2',
ConversionTrackingSetting='google.ads.google_ads.v6.proto.resources.customer_pb2',
ConversionUploadErrorEnum='google.ads.google_ads.v6.proto.errors.conversion_upload_error_pb2',
CountryCodeErrorEnum='google.ads.google_ads.v6.proto.errors.country_code_error_pb2',
CpcBidSimulationPoint='google.ads.google_ads.v6.proto.common.simulation_pb2',
CpcBidSimulationPointList='google.ads.google_ads.v6.proto.common.simulation_pb2',
CpvBidSimulationPoint='google.ads.google_ads.v6.proto.common.simulation_pb2',
CpvBidSimulationPointList='google.ads.google_ads.v6.proto.common.simulation_pb2',
CreateAccountLinkRequest='google.ads.google_ads.v6.proto.services.account_link_service_pb2',
CreateAccountLinkResponse='google.ads.google_ads.v6.proto.services.account_link_service_pb2',
CreateCampaignExperimentMetadata='google.ads.google_ads.v6.proto.services.campaign_experiment_service_pb2',
CreateCampaignExperimentRequest='google.ads.google_ads.v6.proto.services.campaign_experiment_service_pb2',
CreateCustomerClientRequest='google.ads.google_ads.v6.proto.services.customer_service_pb2',
CreateCustomerClientResponse='google.ads.google_ads.v6.proto.services.customer_service_pb2',
CreateOfflineUserDataJobRequest='google.ads.google_ads.v6.proto.services.offline_user_data_job_service_pb2',
CreateOfflineUserDataJobResponse='google.ads.google_ads.v6.proto.services.offline_user_data_job_service_pb2',
CriterionCategoryAvailability='google.ads.google_ads.v6.proto.common.criterion_category_availability_pb2',
CriterionCategoryChannelAvailability='google.ads.google_ads.v6.proto.common.criterion_category_availability_pb2',
CriterionCategoryChannelAvailabilityModeEnum='google.ads.google_ads.v6.proto.enums.criterion_category_channel_availability_mode_pb2',
CriterionCategoryLocaleAvailability='google.ads.google_ads.v6.proto.common.criterion_category_availability_pb2',
CriterionCategoryLocaleAvailabilityModeEnum='google.ads.google_ads.v6.proto.enums.criterion_category_locale_availability_mode_pb2',
CriterionErrorEnum='google.ads.google_ads.v6.proto.errors.criterion_error_pb2',
CriterionSystemServingStatusEnum='google.ads.google_ads.v6.proto.enums.criterion_system_serving_status_pb2',
CriterionTypeEnum='google.ads.google_ads.v6.proto.enums.criterion_type_pb2',
CrmBasedUserListInfo='google.ads.google_ads.v6.proto.common.user_lists_pb2',
CurrencyCodeErrorEnum='google.ads.google_ads.v6.proto.errors.currency_code_error_pb2',
CurrencyConstant='google.ads.google_ads.v6.proto.resources.currency_constant_pb2',
CustomAffinityInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
CustomAudience='google.ads.google_ads.v6.proto.resources.custom_audience_pb2',
CustomAudienceErrorEnum='google.ads.google_ads.v6.proto.errors.custom_audience_error_pb2',
CustomAudienceInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
CustomAudienceMember='google.ads.google_ads.v6.proto.resources.custom_audience_pb2',
CustomAudienceMemberTypeEnum='google.ads.google_ads.v6.proto.enums.custom_audience_member_type_pb2',
CustomAudienceOperation='google.ads.google_ads.v6.proto.services.custom_audience_service_pb2',
CustomAudienceStatusEnum='google.ads.google_ads.v6.proto.enums.custom_audience_status_pb2',
CustomAudienceTypeEnum='google.ads.google_ads.v6.proto.enums.custom_audience_type_pb2',
CustomIntentInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
CustomInterest='google.ads.google_ads.v6.proto.resources.custom_interest_pb2',
CustomInterestErrorEnum='google.ads.google_ads.v6.proto.errors.custom_interest_error_pb2',
CustomInterestMember='google.ads.google_ads.v6.proto.resources.custom_interest_pb2',
CustomInterestMemberTypeEnum='google.ads.google_ads.v6.proto.enums.custom_interest_member_type_pb2',
CustomInterestOperation='google.ads.google_ads.v6.proto.services.custom_interest_service_pb2',
CustomInterestStatusEnum='google.ads.google_ads.v6.proto.enums.custom_interest_status_pb2',
CustomInterestTypeEnum='google.ads.google_ads.v6.proto.enums.custom_interest_type_pb2',
CustomParameter='google.ads.google_ads.v6.proto.common.custom_parameter_pb2',
CustomPlaceholderFieldEnum='google.ads.google_ads.v6.proto.enums.custom_placeholder_field_pb2',
Customer='google.ads.google_ads.v6.proto.resources.customer_pb2',
CustomerClient='google.ads.google_ads.v6.proto.resources.customer_client_pb2',
CustomerClientLink='google.ads.google_ads.v6.proto.resources.customer_client_link_pb2',
CustomerClientLinkErrorEnum='google.ads.google_ads.v6.proto.errors.customer_client_link_error_pb2',
CustomerClientLinkOperation='google.ads.google_ads.v6.proto.services.customer_client_link_service_pb2',
CustomerErrorEnum='google.ads.google_ads.v6.proto.errors.customer_error_pb2',
CustomerExtensionSetting='google.ads.google_ads.v6.proto.resources.customer_extension_setting_pb2',
CustomerExtensionSettingOperation='google.ads.google_ads.v6.proto.services.customer_extension_setting_service_pb2',
CustomerFeed='google.ads.google_ads.v6.proto.resources.customer_feed_pb2',
CustomerFeedErrorEnum='google.ads.google_ads.v6.proto.errors.customer_feed_error_pb2',
CustomerFeedOperation='google.ads.google_ads.v6.proto.services.customer_feed_service_pb2',
CustomerLabel='google.ads.google_ads.v6.proto.resources.customer_label_pb2',
CustomerLabelOperation='google.ads.google_ads.v6.proto.services.customer_label_service_pb2',
CustomerManagerLink='google.ads.google_ads.v6.proto.resources.customer_manager_link_pb2',
CustomerManagerLinkErrorEnum='google.ads.google_ads.v6.proto.errors.customer_manager_link_error_pb2',
CustomerManagerLinkOperation='google.ads.google_ads.v6.proto.services.customer_manager_link_service_pb2',
CustomerMatchUploadKeyTypeEnum='google.ads.google_ads.v6.proto.enums.customer_match_upload_key_type_pb2',
CustomerMatchUserListMetadata='google.ads.google_ads.v6.proto.common.offline_user_data_pb2',
CustomerNegativeCriterion='google.ads.google_ads.v6.proto.resources.customer_negative_criterion_pb2',
CustomerNegativeCriterionOperation='google.ads.google_ads.v6.proto.services.customer_negative_criterion_service_pb2',
CustomerOperation='google.ads.google_ads.v6.proto.services.customer_service_pb2',
CustomerPayPerConversionEligibilityFailureReasonEnum='google.ads.google_ads.v6.proto.enums.customer_pay_per_conversion_eligibility_failure_reason_pb2',
CustomerUserAccess='google.ads.google_ads.v6.proto.resources.customer_user_access_pb2',
CustomerUserAccessErrorEnum='google.ads.google_ads.v6.proto.errors.customer_user_access_error_pb2',
CustomerUserAccessOperation='google.ads.google_ads.v6.proto.services.customer_user_access_service_pb2',
DataDrivenModelStatusEnum='google.ads.google_ads.v6.proto.enums.data_driven_model_status_pb2',
DataPartnerLinkIdentifier='google.ads.google_ads.v6.proto.resources.account_link_pb2',
DatabaseErrorEnum='google.ads.google_ads.v6.proto.errors.database_error_pb2',
DateErrorEnum='google.ads.google_ads.v6.proto.errors.date_error_pb2',
DateRange='google.ads.google_ads.v6.proto.common.dates_pb2',
DateRangeErrorEnum='google.ads.google_ads.v6.proto.errors.date_range_error_pb2',
DateSpecificRuleUserListInfo='google.ads.google_ads.v6.proto.common.user_lists_pb2',
DayOfWeekEnum='google.ads.google_ads.v6.proto.enums.day_of_week_pb2',
DetailPlacementView='google.ads.google_ads.v6.proto.resources.detail_placement_view_pb2',
DeviceEnum='google.ads.google_ads.v6.proto.enums.device_pb2',
DeviceInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
DismissRecommendationRequest='google.ads.google_ads.v6.proto.services.recommendation_service_pb2',
DismissRecommendationResponse='google.ads.google_ads.v6.proto.services.recommendation_service_pb2',
DisplayAdFormatSettingEnum='google.ads.google_ads.v6.proto.enums.display_ad_format_setting_pb2',
DisplayCallToAction='google.ads.google_ads.v6.proto.common.ad_type_infos_pb2',
DisplayKeywordView='google.ads.google_ads.v6.proto.resources.display_keyword_view_pb2',
DisplayUploadAdInfo='google.ads.google_ads.v6.proto.common.ad_type_infos_pb2',
DisplayUploadProductTypeEnum='google.ads.google_ads.v6.proto.enums.display_upload_product_type_pb2',
DistanceBucketEnum='google.ads.google_ads.v6.proto.enums.distance_bucket_pb2',
DistanceView='google.ads.google_ads.v6.proto.resources.distance_view_pb2',
DistinctErrorEnum='google.ads.google_ads.v6.proto.errors.distinct_error_pb2',
DomainCategory='google.ads.google_ads.v6.proto.resources.domain_category_pb2',
DsaPageFeedCriterionFieldEnum='google.ads.google_ads.v6.proto.enums.dsa_page_feed_criterion_field_pb2',
DynamicAffiliateLocationSetFilter='google.ads.google_ads.v6.proto.common.feed_item_set_filter_type_infos_pb2',
DynamicLocationSetFilter='google.ads.google_ads.v6.proto.common.feed_item_set_filter_type_infos_pb2',
DynamicSearchAdsSearchTermView='google.ads.google_ads.v6.proto.resources.dynamic_search_ads_search_term_view_pb2',
EducationPlaceholderFieldEnum='google.ads.google_ads.v6.proto.enums.education_placeholder_field_pb2',
EndCampaignExperimentRequest='google.ads.google_ads.v6.proto.services.campaign_experiment_service_pb2',
EnhancedCpc='google.ads.google_ads.v6.proto.common.bidding_pb2',
EnumErrorEnum='google.ads.google_ads.v6.proto.errors.enum_error_pb2',
ErrorCode='google.ads.google_ads.v6.proto.errors.errors_pb2',
ErrorDetails='google.ads.google_ads.v6.proto.errors.errors_pb2',
ErrorLocation='google.ads.google_ads.v6.proto.errors.errors_pb2',
ExpandedDynamicSearchAdInfo='google.ads.google_ads.v6.proto.common.ad_type_infos_pb2',
ExpandedLandingPageView='google.ads.google_ads.v6.proto.resources.expanded_landing_page_view_pb2',
ExpandedTextAdInfo='google.ads.google_ads.v6.proto.common.ad_type_infos_pb2',
ExplorerAutoOptimizerSetting='google.ads.google_ads.v6.proto.common.explorer_auto_optimizer_setting_pb2',
ExpressionRuleUserListInfo='google.ads.google_ads.v6.proto.common.user_lists_pb2',
ExtensionFeedItem='google.ads.google_ads.v6.proto.resources.extension_feed_item_pb2',
ExtensionFeedItemErrorEnum='google.ads.google_ads.v6.proto.errors.extension_feed_item_error_pb2',
ExtensionFeedItemOperation='google.ads.google_ads.v6.proto.services.extension_feed_item_service_pb2',
ExtensionSettingDeviceEnum='google.ads.google_ads.v6.proto.enums.extension_setting_device_pb2',
ExtensionSettingErrorEnum='google.ads.google_ads.v6.proto.errors.extension_setting_error_pb2',
ExtensionTypeEnum='google.ads.google_ads.v6.proto.enums.extension_type_pb2',
ExternalAttributionData='google.ads.google_ads.v6.proto.services.conversion_upload_service_pb2',
ExternalConversionSourceEnum='google.ads.google_ads.v6.proto.enums.external_conversion_source_pb2',
Feed='google.ads.google_ads.v6.proto.resources.feed_pb2',
FeedAttribute='google.ads.google_ads.v6.proto.resources.feed_pb2',
FeedAttributeOperation='google.ads.google_ads.v6.proto.resources.feed_pb2',
FeedAttributeReferenceErrorEnum='google.ads.google_ads.v6.proto.errors.feed_attribute_reference_error_pb2',
FeedAttributeTypeEnum='google.ads.google_ads.v6.proto.enums.feed_attribute_type_pb2',
FeedErrorEnum='google.ads.google_ads.v6.proto.errors.feed_error_pb2',
FeedItem='google.ads.google_ads.v6.proto.resources.feed_item_pb2',
FeedItemAttributeValue='google.ads.google_ads.v6.proto.resources.feed_item_pb2',
FeedItemErrorEnum='google.ads.google_ads.v6.proto.errors.feed_item_error_pb2',
FeedItemOperation='google.ads.google_ads.v6.proto.services.feed_item_service_pb2',
FeedItemPlaceholderPolicyInfo='google.ads.google_ads.v6.proto.resources.feed_item_pb2',
FeedItemQualityApprovalStatusEnum='google.ads.google_ads.v6.proto.enums.feed_item_quality_approval_status_pb2',
FeedItemQualityDisapprovalReasonEnum='google.ads.google_ads.v6.proto.enums.feed_item_quality_disapproval_reason_pb2',
FeedItemSet='google.ads.google_ads.v6.proto.resources.feed_item_set_pb2',
FeedItemSetErrorEnum='google.ads.google_ads.v6.proto.errors.feed_item_set_error_pb2',
FeedItemSetLink='google.ads.google_ads.v6.proto.resources.feed_item_set_link_pb2',
FeedItemSetLinkErrorEnum='google.ads.google_ads.v6.proto.errors.feed_item_set_link_error_pb2',
FeedItemSetLinkOperation='google.ads.google_ads.v6.proto.services.feed_item_set_link_service_pb2',
FeedItemSetOperation='google.ads.google_ads.v6.proto.services.feed_item_set_service_pb2',
FeedItemSetStatusEnum='google.ads.google_ads.v6.proto.enums.feed_item_set_status_pb2',
FeedItemSetStringFilterTypeEnum='google.ads.google_ads.v6.proto.enums.feed_item_set_string_filter_type_pb2',
FeedItemStatusEnum='google.ads.google_ads.v6.proto.enums.feed_item_status_pb2',
FeedItemTarget='google.ads.google_ads.v6.proto.resources.feed_item_target_pb2',
FeedItemTargetDeviceEnum='google.ads.google_ads.v6.proto.enums.feed_item_target_device_pb2',
FeedItemTargetErrorEnum='google.ads.google_ads.v6.proto.errors.feed_item_target_error_pb2',
FeedItemTargetOperation='google.ads.google_ads.v6.proto.services.feed_item_target_service_pb2',
FeedItemTargetStatusEnum='google.ads.google_ads.v6.proto.enums.feed_item_target_status_pb2',
FeedItemTargetTypeEnum='google.ads.google_ads.v6.proto.enums.feed_item_target_type_pb2',
FeedItemValidationError='google.ads.google_ads.v6.proto.resources.feed_item_pb2',
FeedItemValidationErrorEnum='google.ads.google_ads.v6.proto.errors.feed_item_validation_error_pb2',
FeedItemValidationStatusEnum='google.ads.google_ads.v6.proto.enums.feed_item_validation_status_pb2',
FeedLinkStatusEnum='google.ads.google_ads.v6.proto.enums.feed_link_status_pb2',
FeedMapping='google.ads.google_ads.v6.proto.resources.feed_mapping_pb2',
FeedMappingCriterionTypeEnum='google.ads.google_ads.v6.proto.enums.feed_mapping_criterion_type_pb2',
FeedMappingErrorEnum='google.ads.google_ads.v6.proto.errors.feed_mapping_error_pb2',
FeedMappingOperation='google.ads.google_ads.v6.proto.services.feed_mapping_service_pb2',
FeedMappingStatusEnum='google.ads.google_ads.v6.proto.enums.feed_mapping_status_pb2',
FeedOperation='google.ads.google_ads.v6.proto.services.feed_service_pb2',
FeedOriginEnum='google.ads.google_ads.v6.proto.enums.feed_origin_pb2',
FeedPlaceholderView='google.ads.google_ads.v6.proto.resources.feed_placeholder_view_pb2',
FeedStatusEnum='google.ads.google_ads.v6.proto.enums.feed_status_pb2',
FieldErrorEnum='google.ads.google_ads.v6.proto.errors.field_error_pb2',
FieldMaskErrorEnum='google.ads.google_ads.v6.proto.errors.field_mask_error_pb2',
FinalAppUrl='google.ads.google_ads.v6.proto.common.final_app_url_pb2',
FlightPlaceholderFieldEnum='google.ads.google_ads.v6.proto.enums.flight_placeholder_field_pb2',
Forecast='google.ads.google_ads.v6.proto.services.reach_plan_service_pb2',
ForecastMetrics='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
FrequencyCap='google.ads.google_ads.v6.proto.services.reach_plan_service_pb2',
FrequencyCapEntry='google.ads.google_ads.v6.proto.common.frequency_cap_pb2',
FrequencyCapEventTypeEnum='google.ads.google_ads.v6.proto.enums.frequency_cap_event_type_pb2',
FrequencyCapKey='google.ads.google_ads.v6.proto.common.frequency_cap_pb2',
FrequencyCapLevelEnum='google.ads.google_ads.v6.proto.enums.frequency_cap_level_pb2',
FrequencyCapTimeUnitEnum='google.ads.google_ads.v6.proto.enums.frequency_cap_time_unit_pb2',
FunctionErrorEnum='google.ads.google_ads.v6.proto.errors.function_error_pb2',
FunctionParsingErrorEnum='google.ads.google_ads.v6.proto.errors.function_parsing_error_pb2',
GclidDateTimePair='google.ads.google_ads.v6.proto.services.conversion_adjustment_upload_service_pb2',
GenderInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
GenderTypeEnum='google.ads.google_ads.v6.proto.enums.gender_type_pb2',
GenderView='google.ads.google_ads.v6.proto.resources.gender_view_pb2',
GenerateForecastCurveRequest='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
GenerateForecastCurveResponse='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
GenerateForecastMetricsRequest='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
GenerateForecastMetricsResponse='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
GenerateForecastTimeSeriesRequest='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
GenerateForecastTimeSeriesResponse='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
GenerateHistoricalMetricsRequest='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
GenerateHistoricalMetricsResponse='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
GenerateKeywordIdeaResponse='google.ads.google_ads.v6.proto.services.keyword_plan_idea_service_pb2',
GenerateKeywordIdeaResult='google.ads.google_ads.v6.proto.services.keyword_plan_idea_service_pb2',
GenerateKeywordIdeasRequest='google.ads.google_ads.v6.proto.services.keyword_plan_idea_service_pb2',
GenerateProductMixIdeasRequest='google.ads.google_ads.v6.proto.services.reach_plan_service_pb2',
GenerateProductMixIdeasResponse='google.ads.google_ads.v6.proto.services.reach_plan_service_pb2',
GenerateReachForecastRequest='google.ads.google_ads.v6.proto.services.reach_plan_service_pb2',
GenerateReachForecastResponse='google.ads.google_ads.v6.proto.services.reach_plan_service_pb2',
GeoPointInfo='google.ads.google_ads.v6.proto.common.criteria_pb2',
GeoTargetConstant='google.ads.google_ads.v6.proto.resources.geo_target_constant_pb2',
GeoTargetConstantStatusEnum='google.ads.google_ads.v6.proto.enums.geo_target_constant_status_pb2',
GeoTargetConstantSuggestion='google.ads.google_ads.v6.proto.services.geo_target_constant_service_pb2',
GeoTargetConstantSuggestionErrorEnum='google.ads.google_ads.v6.proto.errors.geo_target_constant_suggestion_error_pb2',
GeoTargetingRestrictionEnum='google.ads.google_ads.v6.proto.enums.geo_targeting_restriction_pb2',
GeoTargetingTypeEnum='google.ads.google_ads.v6.proto.enums.geo_targeting_type_pb2',
GeographicView='google.ads.google_ads.v6.proto.resources.geographic_view_pb2',
GetAccountBudgetProposalRequest='google.ads.google_ads.v6.proto.services.account_budget_proposal_service_pb2',
GetAccountBudgetRequest='google.ads.google_ads.v6.proto.services.account_budget_service_pb2',
GetAccountLinkRequest='google.ads.google_ads.v6.proto.services.account_link_service_pb2',
GetAdGroupAdAssetViewRequest='google.ads.google_ads.v6.proto.services.ad_group_ad_asset_view_service_pb2',
GetAdGroupAdLabelRequest='google.ads.google_ads.v6.proto.services.ad_group_ad_label_service_pb2',
GetAdGroupAdRequest='google.ads.google_ads.v6.proto.services.ad_group_ad_service_pb2',
GetAdGroupAudienceViewRequest='google.ads.google_ads.v6.proto.services.ad_group_audience_view_service_pb2',
GetAdGroupBidModifierRequest='google.ads.google_ads.v6.proto.services.ad_group_bid_modifier_service_pb2',
GetAdGroupCriterionLabelRequest='google.ads.google_ads.v6.proto.services.ad_group_criterion_label_service_pb2',
GetAdGroupCriterionRequest='google.ads.google_ads.v6.proto.services.ad_group_criterion_service_pb2',
GetAdGroupCriterionSimulationRequest='google.ads.google_ads.v6.proto.services.ad_group_criterion_simulation_service_pb2',
GetAdGroupExtensionSettingRequest='google.ads.google_ads.v6.proto.services.ad_group_extension_setting_service_pb2',
GetAdGroupFeedRequest='google.ads.google_ads.v6.proto.services.ad_group_feed_service_pb2',
GetAdGroupLabelRequest='google.ads.google_ads.v6.proto.services.ad_group_label_service_pb2',
GetAdGroupRequest='google.ads.google_ads.v6.proto.services.ad_group_service_pb2',
GetAdGroupSimulationRequest='google.ads.google_ads.v6.proto.services.ad_group_simulation_service_pb2',
GetAdParameterRequest='google.ads.google_ads.v6.proto.services.ad_parameter_service_pb2',
GetAdRequest='google.ads.google_ads.v6.proto.services.ad_service_pb2',
GetAdScheduleViewRequest='google.ads.google_ads.v6.proto.services.ad_schedule_view_service_pb2',
GetAgeRangeViewRequest='google.ads.google_ads.v6.proto.services.age_range_view_service_pb2',
GetAssetRequest='google.ads.google_ads.v6.proto.services.asset_service_pb2',
GetBatchJobRequest='google.ads.google_ads.v6.proto.services.batch_job_service_pb2',
GetBiddingStrategyRequest='google.ads.google_ads.v6.proto.services.bidding_strategy_service_pb2',
GetBillingSetupRequest='google.ads.google_ads.v6.proto.services.billing_setup_service_pb2',
GetCampaignAssetRequest='google.ads.google_ads.v6.proto.services.campaign_asset_service_pb2',
GetCampaignAudienceViewRequest='google.ads.google_ads.v6.proto.services.campaign_audience_view_service_pb2',
GetCampaignBidModifierRequest='google.ads.google_ads.v6.proto.services.campaign_bid_modifier_service_pb2',
GetCampaignBudgetRequest='google.ads.google_ads.v6.proto.services.campaign_budget_service_pb2',
GetCampaignCriterionRequest='google.ads.google_ads.v6.proto.services.campaign_criterion_service_pb2',
GetCampaignCriterionSimulationRequest='google.ads.google_ads.v6.proto.services.campaign_criterion_simulation_service_pb2',
GetCampaignDraftRequest='google.ads.google_ads.v6.proto.services.campaign_draft_service_pb2',
GetCampaignExperimentRequest='google.ads.google_ads.v6.proto.services.campaign_experiment_service_pb2',
GetCampaignExtensionSettingRequest='google.ads.google_ads.v6.proto.services.campaign_extension_setting_service_pb2',
GetCampaignFeedRequest='google.ads.google_ads.v6.proto.services.campaign_feed_service_pb2',
GetCampaignLabelRequest='google.ads.google_ads.v6.proto.services.campaign_label_service_pb2',
GetCampaignRequest='google.ads.google_ads.v6.proto.services.campaign_service_pb2',
GetCampaignSharedSetRequest='google.ads.google_ads.v6.proto.services.campaign_shared_set_service_pb2',
GetCarrierConstantRequest='google.ads.google_ads.v6.proto.services.carrier_constant_service_pb2',
GetChangeStatusRequest='google.ads.google_ads.v6.proto.services.change_status_service_pb2',
GetClickViewRequest='google.ads.google_ads.v6.proto.services.click_view_service_pb2',
GetCombinedAudienceRequest='google.ads.google_ads.v6.proto.services.combined_audience_service_pb2',
GetConversionActionRequest='google.ads.google_ads.v6.proto.services.conversion_action_service_pb2',
GetCurrencyConstantRequest='google.ads.google_ads.v6.proto.services.currency_constant_service_pb2',
GetCustomAudienceRequest='google.ads.google_ads.v6.proto.services.custom_audience_service_pb2',
GetCustomInterestRequest='google.ads.google_ads.v6.proto.services.custom_interest_service_pb2',
GetCustomerClientLinkRequest='google.ads.google_ads.v6.proto.services.customer_client_link_service_pb2',
GetCustomerClientRequest='google.ads.google_ads.v6.proto.services.customer_client_service_pb2',
GetCustomerExtensionSettingRequest='google.ads.google_ads.v6.proto.services.customer_extension_setting_service_pb2',
GetCustomerFeedRequest='google.ads.google_ads.v6.proto.services.customer_feed_service_pb2',
GetCustomerLabelRequest='google.ads.google_ads.v6.proto.services.customer_label_service_pb2',
GetCustomerManagerLinkRequest='google.ads.google_ads.v6.proto.services.customer_manager_link_service_pb2',
GetCustomerNegativeCriterionRequest='google.ads.google_ads.v6.proto.services.customer_negative_criterion_service_pb2',
GetCustomerRequest='google.ads.google_ads.v6.proto.services.customer_service_pb2',
GetCustomerUserAccessRequest='google.ads.google_ads.v6.proto.services.customer_user_access_service_pb2',
GetDetailPlacementViewRequest='google.ads.google_ads.v6.proto.services.detail_placement_view_service_pb2',
GetDisplayKeywordViewRequest='google.ads.google_ads.v6.proto.services.display_keyword_view_service_pb2',
GetDistanceViewRequest='google.ads.google_ads.v6.proto.services.distance_view_service_pb2',
GetDomainCategoryRequest='google.ads.google_ads.v6.proto.services.domain_category_service_pb2',
GetDynamicSearchAdsSearchTermViewRequest='google.ads.google_ads.v6.proto.services.dynamic_search_ads_search_term_view_service_pb2',
GetExpandedLandingPageViewRequest='google.ads.google_ads.v6.proto.services.expanded_landing_page_view_service_pb2',
GetExtensionFeedItemRequest='google.ads.google_ads.v6.proto.services.extension_feed_item_service_pb2',
GetFeedItemRequest='google.ads.google_ads.v6.proto.services.feed_item_service_pb2',
GetFeedItemSetLinkRequest='google.ads.google_ads.v6.proto.services.feed_item_set_link_service_pb2',
GetFeedItemSetRequest='google.ads.google_ads.v6.proto.services.feed_item_set_service_pb2',
GetFeedItemTargetRequest='google.ads.google_ads.v6.proto.services.feed_item_target_service_pb2',
GetFeedMappingRequest='google.ads.google_ads.v6.proto.services.feed_mapping_service_pb2',
GetFeedPlaceholderViewRequest='google.ads.google_ads.v6.proto.services.feed_placeholder_view_service_pb2',
GetFeedRequest='google.ads.google_ads.v6.proto.services.feed_service_pb2',
GetGenderViewRequest='google.ads.google_ads.v6.proto.services.gender_view_service_pb2',
GetGeoTargetConstantRequest='google.ads.google_ads.v6.proto.services.geo_target_constant_service_pb2',
GetGeographicViewRequest='google.ads.google_ads.v6.proto.services.geographic_view_service_pb2',
GetGoogleAdsFieldRequest='google.ads.google_ads.v6.proto.services.google_ads_field_service_pb2',
GetGroupPlacementViewRequest='google.ads.google_ads.v6.proto.services.group_placement_view_service_pb2',
GetHotelGroupViewRequest='google.ads.google_ads.v6.proto.services.hotel_group_view_service_pb2',
GetHotelPerformanceViewRequest='google.ads.google_ads.v6.proto.services.hotel_performance_view_service_pb2',
GetIncomeRangeViewRequest='google.ads.google_ads.v6.proto.services.income_range_view_service_pb2',
GetKeywordPlanAdGroupKeywordRequest='google.ads.google_ads.v6.proto.services.keyword_plan_ad_group_keyword_service_pb2',
GetKeywordPlanAdGroupRequest='google.ads.google_ads.v6.proto.services.keyword_plan_ad_group_service_pb2',
GetKeywordPlanCampaignKeywordRequest='google.ads.google_ads.v6.proto.services.keyword_plan_campaign_keyword_service_pb2',
GetKeywordPlanCampaignRequest='google.ads.google_ads.v6.proto.services.keyword_plan_campaign_service_pb2',
GetKeywordPlanRequest='google.ads.google_ads.v6.proto.services.keyword_plan_service_pb2',
GetKeywordViewRequest='google.ads.google_ads.v6.proto.services.keyword_view_service_pb2',
GetLabelRequest='google.ads.google_ads.v6.proto.services.label_service_pb2',
GetLandingPageViewRequest='google.ads.google_ads.v6.proto.services.landing_page_view_service_pb2',
GetLanguageConstantRequest='google.ads.google_ads.v6.proto.services.language_constant_service_pb2',
GetLocationViewRequest='google.ads.google_ads.v6.proto.services.location_view_service_pb2',
GetManagedPlacementViewRequest='google.ads.google_ads.v6.proto.services.managed_placement_view_service_pb2',
GetMediaFileRequest='google.ads.google_ads.v6.proto.services.media_file_service_pb2',
GetMerchantCenterLinkRequest='google.ads.google_ads.v6.proto.services.merchant_center_link_service_pb2',
GetMobileAppCategoryConstantRequest='google.ads.google_ads.v6.proto.services.mobile_app_category_constant_service_pb2',
GetMobileDeviceConstantRequest='google.ads.google_ads.v6.proto.services.mobile_device_constant_service_pb2',
GetOfflineUserDataJobRequest='google.ads.google_ads.v6.proto.services.offline_user_data_job_service_pb2',
GetOperatingSystemVersionConstantRequest='google.ads.google_ads.v6.proto.services.operating_system_version_constant_service_pb2',
GetPaidOrganicSearchTermViewRequest='google.ads.google_ads.v6.proto.services.paid_organic_search_term_view_service_pb2',
GetParentalStatusViewRequest='google.ads.google_ads.v6.proto.services.parental_status_view_service_pb2',
GetProductBiddingCategoryConstantRequest='google.ads.google_ads.v6.proto.services.product_bidding_category_constant_service_pb2',
GetProductGroupViewRequest='google.ads.google_ads.v6.proto.services.product_group_view_service_pb2',
GetRecommendationRequest='google.ads.google_ads.v6.proto.services.recommendation_service_pb2',
GetRemarketingActionRequest='google.ads.google_ads.v6.proto.services.remarketing_action_service_pb2',
GetSearchTermViewRequest='google.ads.google_ads.v6.proto.services.search_term_view_service_pb2',
GetSharedCriterionRequest='google.ads.google_ads.v6.proto.services.shared_criterion_service_pb2',
GetSharedSetRequest='google.ads.google_ads.v6.proto.services.shared_set_service_pb2',
GetShoppingPerformanceViewRequest='google.ads.google_ads.v6.proto.services.shopping_performance_view_service_pb2',
GetThirdPartyAppAnalyticsLinkRequest='google.ads.google_ads.v6.proto.services.third_party_app_analytics_link_service_pb2',
GetTopicConstantRequest='google.ads.google_ads.v6.proto.services.topic_constant_service_pb2',
GetTopicViewRequest='google.ads.google_ads.v6.proto.services.topic_view_service_pb2',
GetUserInterestRequest='google.ads.google_ads.v6.proto.services.user_interest_service_pb2',
GetUserListRequest='google.ads.google_ads.v6.proto.services.user_list_service_pb2',
GetUserLocationViewRequest='google.ads.google_ads.v6.proto.services.user_location_view_service_pb2',
GetVideoRequest='google.ads.google_ads.v6.proto.services.video_service_pb2',
GmailAdInfo='google.ads.google_ads.v6.proto.common.ad_type_infos_pb2',
GmailTeaser='google.ads.google_ads.v6.proto.common.ad_type_infos_pb2',
GoogleAdsError='google.ads.google_ads.v6.proto.errors.errors_pb2',
GoogleAdsFailure='google.ads.google_ads.v6.proto.errors.errors_pb2',
| |
from django.db import models
from django.contrib.auth.models import User
from datetime import date
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
user_data = models.FileField(upload_to='user_data',blank = True, null=True)
indicateurs_data = models.FileField(upload_to='indicateurs_data', blank = True, null=True)
def __str__(self):
return self.user.username
class Dossier(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
numero_op = models.CharField(max_length=50, blank = True, null=True)
porteur = models.CharField(max_length=100, blank = True, null=True)
libélé = models.CharField(max_length=500, blank = True, null=True)
descriptif = models.CharField(max_length=200000, blank = True, null=True)
axe = models.CharField(max_length=15, blank = True, null=True)
os = models.CharField(max_length=15, blank = True, null=True)
DI = models.CharField(max_length=100, blank = True, null=True)
OT = models.CharField(max_length=200, blank = True, null=True)
AAP = models.CharField(max_length=100, null= True, blank=True)
date_dépôt = models.CharField(max_length=15, blank = True, null=True)
date_réception = models.CharField(max_length=15, blank = True, null=True)
date_complétude = models.CharField(max_length=15, blank = True, null=True)
date_CRUP = models.CharField(max_length=30, blank = True, null=True)
date_notification = models.CharField(max_length=15, blank = True, null=True)
date_signature = models.CharField(max_length=15, blank = True, null=True)
debut_op = models.CharField(max_length=15, blank = True, null=True)
fin_op = models.CharField(max_length=15, blank = True, null=True)
début_éligibilité = models.CharField(max_length=15, blank = True, null=True)
fin_éligibilité = models.CharField(max_length=15, blank = True, null=True)
montant_CT = models.FloatField(null = True, blank = True, default = 0)
montant_UE = models.FloatField(null = True, blank = True, default = 0)
montant_Etat = models.FloatField(null = True, blank = True, default = 0)
montant_CD = models.FloatField(null = True, blank = True, default = 0)
montant_Reg = models.FloatField(null = True, blank = True, default = 0)
montant_autre_public = models.FloatField(null = True, blank = True, default = 0)
montant_public = models.FloatField(null = True, blank = True, default = 0)
montant_privé = models.FloatField(null = True, blank = True, default = 0)
montant_auto = models.FloatField(null = True, blank = True, default = 0)
DP_CT_depot = models.FloatField(null = True, blank = True, default = 0)
DP_certif = models.FloatField(null = True, blank = True, default = 0)
DP_payé = models.FloatField(null = True, blank = True, default = 0)
statut_macro = models.CharField(max_length=50, default='', blank = True, null=True)
statut_détaillé = models.CharField(max_length=50, default='', blank = True, null=True)
type_bénéficiaire = models.CharField(max_length=50, default='', blank = True, null=True)
représentant_légal = models.CharField(max_length=50, default='', blank = True, null=True)
représentant_légal_tel = models.CharField(max_length=50, default='', blank = True, null=True)
représentant_légal_mail = models.CharField(max_length=50, default='', blank = True, null=True)
référent = models.CharField(max_length=50, default='', blank = True, null=True)
référent_tel = models.CharField(max_length=50, default='', blank = True, null=True)
référent_mail = models.CharField(max_length=50, default='', blank = True, null=True)
instructeur = models.CharField(max_length=50, default='', blank = True, null=True)
service = models.CharField(max_length=50, default='', blank = True, null=True)
avis_SI = models.CharField(max_length=15, default='', blank = True, null=True)
motivation_avis = models.CharField(max_length=400, default='', blank = True, null=True)
note = models.CharField(max_length=500, default='Notes', blank = True, null=True)
def __str__(self):
return self.numero_op + ' - ' + self.porteur
class Indicateur(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
numero_op = models.CharField(max_length=200, default='', blank = True, null=True)
indicateur = models.CharField(max_length=200, default='', blank = True, null=True)
libellé = models.CharField(max_length=200, default='', blank = True, null=True)
type_indicateur = models.CharField(max_length=15, default='', blank = True, null=True)
unité = models.CharField(max_length=25, default='', blank = True, null=True)
valeur_prev = models.FloatField(null = True, blank = True, default = 0)
valeur_conv = models.FloatField(null = True, blank = True, default = 0)
valeur_real = models.FloatField(null = True, blank = True, default = 0)
def __str__(self):
return self.indicateur + ' - ' + self.numero_op
class Enveloppe(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
Enveloppe_totale = models.FloatField(null = True, blank = True, default = 0)
Axe_1=models.FloatField(null = True, blank = True, default = 0)
OS_1_1=models.FloatField(null = True, blank = True, default = 0)
OS_1_2=models.FloatField(null = True, blank = True, default = 0)
OS_1_3=models.FloatField(null = True, blank = True, default = 0)
OS_1_4=models.FloatField(null = True, blank = True, default = 0)
OS_1_5=models.FloatField(null = True, blank = True, default = 0)
OS_1_6=models.FloatField(null = True, blank = True, default = 0)
OS_1_7=models.FloatField(null = True, blank = True, default = 0)
OS_1_8=models.FloatField(null = True, blank = True, default = 0)
OS_1_9=models.FloatField(null = True, blank = True, default = 0)
OS_1_10=models.FloatField(null = True, blank = True, default = 0)
Axe_2=models.FloatField(null = True, blank = True, default = 0)
OS_2_1=models.FloatField(null = True, blank = True, default = 0)
OS_2_2=models.FloatField(null = True, blank = True, default = 0)
OS_2_3=models.FloatField(null = True, blank = True, default = 0)
OS_2_4=models.FloatField(null = True, blank = True, default = 0)
OS_2_5=models.FloatField(null = True, blank = True, default = 0)
OS_2_6=models.FloatField(null = True, blank = True, default = 0)
OS_2_7=models.FloatField(null = True, blank = True, default = 0)
OS_2_8=models.FloatField(null = True, blank = True, default = 0)
OS_2_9=models.FloatField(null = True, blank = True, default = 0)
OS_2_10=models.FloatField(null = True, blank = True, default = 0)
Axe_3=models.FloatField(null = True, blank = True, default = 0)
OS_3_1=models.FloatField(null = True, blank = True, default = 0)
OS_3_2=models.FloatField(null = True, blank = True, default = 0)
OS_3_3=models.FloatField(null = True, blank = True, default = 0)
OS_3_4=models.FloatField(null = True, blank = True, default = 0)
OS_3_5=models.FloatField(null = True, blank = True, default = 0)
OS_3_6=models.FloatField(null = True, blank = True, default = 0)
OS_3_7=models.FloatField(null = True, blank = True, default = 0)
OS_3_8=models.FloatField(null = True, blank = True, default = 0)
OS_3_9=models.FloatField(null = True, blank = True, default = 0)
OS_3_10=models.FloatField(null = True, blank = True, default = 0)
Axe_4=models.FloatField(null = True, blank = True, default = 0)
OS_4_1=models.FloatField(null = True, blank = True, default = 0)
OS_4_2=models.FloatField(null = True, blank = True, default = 0)
OS_4_3=models.FloatField(null = True, blank = True, default = 0)
OS_4_4=models.FloatField(null = True, blank = True, default = 0)
OS_4_5=models.FloatField(null = True, blank = True, default = 0)
OS_4_6=models.FloatField(null = True, blank = True, default = 0)
OS_4_7=models.FloatField(null = True, blank = True, default = 0)
OS_4_8=models.FloatField(null = True, blank = True, default = 0)
OS_4_9=models.FloatField(null = True, blank = True, default = 0)
OS_4_10=models.FloatField(null = True, blank = True, default = 0)
Axe_5=models.FloatField(null = True, blank = True, default = 0)
OS_5_1=models.FloatField(null = True, blank = True, default = 0)
OS_5_2=models.FloatField(null = True, blank = True, default = 0)
OS_5_3=models.FloatField(null = True, blank = True, default = 0)
OS_5_4=models.FloatField(null = True, blank = True, default = 0)
OS_5_5=models.FloatField(null = True, blank = True, default = 0)
OS_5_6=models.FloatField(null = True, blank = True, default = 0)
OS_5_7=models.FloatField(null = True, blank = True, default = 0)
OS_5_8=models.FloatField(null = True, blank = True, default = 0)
OS_5_9=models.FloatField(null = True, blank = True, default = 0)
OS_5_10=models.FloatField(null = True, blank = True, default = 0)
Axe_6=models.FloatField(null = True, blank = True, default = 0)
OS_6_1=models.FloatField(null = True, blank = True, default = 0)
OS_6_2=models.FloatField(null = True, blank = True, default = 0)
OS_6_3=models.FloatField(null = True, blank = True, default = 0)
OS_6_4=models.FloatField(null = True, blank = True, default = 0)
OS_6_5=models.FloatField(null = True, blank = True, default = 0)
OS_6_6=models.FloatField(null = True, blank = True, default = 0)
OS_6_7=models.FloatField(null = True, blank = True, default = 0)
OS_6_8=models.FloatField(null = True, blank = True, default = 0)
OS_6_9=models.FloatField(null = True, blank = True, default = 0)
OS_6_10=models.FloatField(null = True, blank = True, default = 0)
Axe_7=models.FloatField(null = True, blank = True, default = 0)
OS_7_1=models.FloatField(null = True, blank = True, default = 0)
OS_7_2=models.FloatField(null = True, blank = True, default = 0)
OS_7_3=models.FloatField(null = True, blank = True, default = 0)
OS_7_4=models.FloatField(null = True, blank = True, default = 0)
OS_7_5=models.FloatField(null = True, blank = True, default = 0)
OS_7_6=models.FloatField(null = True, blank = True, default = 0)
OS_7_7=models.FloatField(null = True, blank = True, default = 0)
OS_7_8=models.FloatField(null = True, blank = True, default = 0)
OS_7_9=models.FloatField(null = True, blank = True, default = 0)
OS_7_10=models.FloatField(null = True, blank = True, default = 0)
Axe_8=models.FloatField(null = True, blank = True, default = 0)
OS_8_1=models.FloatField(null = True, blank = True, default = 0)
OS_8_2=models.FloatField(null = True, blank = True, default = 0)
OS_8_3=models.FloatField(null = True, blank = True, default = 0)
OS_8_4=models.FloatField(null = True, blank = True, default = 0)
OS_8_5=models.FloatField(null = True, blank = True, default = 0)
OS_8_6=models.FloatField(null = True, blank = True, default = 0)
OS_8_7=models.FloatField(null = True, blank = True, default = 0)
OS_8_8=models.FloatField(null = True, blank = True, default = 0)
OS_8_9=models.FloatField(null = True, blank = True, default = 0)
OS_8_10=models.FloatField(null = True, blank = True, default = 0)
Axe_9=models.FloatField(null = True, blank = True, default = 0)
OS_9_1=models.FloatField(null = True, blank = True, default = 0)
OS_9_2=models.FloatField(null = True, blank = True, default = 0)
OS_9_3=models.FloatField(null = True, blank = True, default = 0)
OS_9_4=models.FloatField(null = True, blank = True, default = 0)
OS_9_5=models.FloatField(null = True, blank = True, default = 0)
OS_9_6=models.FloatField(null = True, blank = True, default = 0)
OS_9_7=models.FloatField(null = True, blank = True, default = 0)
OS_9_8=models.FloatField(null = True, blank = True, default = 0)
OS_9_9=models.FloatField(null = True, blank = True, default = 0)
OS_9_10=models.FloatField(null = True, blank = True, default = 0)
Axe_10=models.FloatField(null = True, blank = True, default = 0)
OS_10_1=models.FloatField(null = True, blank = True, default = 0)
OS_10_2=models.FloatField(null = True, blank | |
"""<NAME>'s Really library
_______ _______ _______ _______ _______ _______
|\ /|\ /|\ /|\ /|\ /|\ /|
| +---+ | +---+ | +---+ | +---+ | +---+ | +---+ |
| | | | | | | | | | | | | | | | | | |
| |R | | |E | | |A | | |L | | |L | | |Y | |
| +---+ | +---+ | +---+ | +---+ | +---+ | +---+ |
|/_____\|/_____\|/_____\|/_____\|/_____\|/_____\|
This module is a core library that:
- provides foundational support
-
-
This module contains the following public classes:
- Really:
- NotReally: various utilities forgotten in the Python Standard Library
"""
__version__ = "0.1"
__all__ = ["Really", "NotReally"]
class Really():
"""Really Class:
To be continued...
"""
def __init__(self, num):
"""Initialize Really instance"""
self.num = num
def __str__(self):
"""String representation"""
return "Really(num=\"" + self.num + "\")"
def is_0(self):
"""Verify whether num property is 0"""
return self.num == 0
def is_1(self):
"""Verify whether num property is 1"""
return self.num == 1
def is_2(self):
"""Verify whether num property is 2"""
return self.num == 2
def is_3(self):
"""Verify whether num property is 3"""
return self.num == 3
def is_4(self):
"""Verify whether num property is 4"""
return self.num == 4
def is_5(self):
"""Verify whether num property is 5"""
return self.num == 5
def is_6(self):
"""Verify whether num property is 6"""
return self.num == 6
def is_7(self):
"""Verify whether num property is 7"""
return self.num == 7
def is_8(self):
"""Verify whether num property is 8"""
return self.num == 8
def is_9(self):
"""Verify whether num property is 9"""
return self.num == 9
def is_10(self):
"""Verify whether num property is 10"""
return self.num == 10
def is_11(self):
"""Verify whether num property is 11"""
return self.num == 11
def is_12(self):
"""Verify whether num property is 12"""
return self.num == 12
def is_13(self):
"""Verify whether num property is 13"""
return self.num == 13
def is_14(self):
"""Verify whether num property is 14"""
return self.num == 14
def is_15(self):
"""Verify whether num property is 15"""
return self.num == 15
def is_16(self):
"""Verify whether num property is 16"""
return self.num == 16
def is_17(self):
"""Verify whether num property is 17"""
return self.num == 17
def is_18(self):
"""Verify whether num property is 18"""
return self.num == 18
def is_19(self):
"""Verify whether num property is 19"""
return self.num == 19
def is_20(self):
"""Verify whether num property is 20"""
return self.num == 20
def is_21(self):
"""Verify whether num property is 21"""
return self.num == 21
def is_22(self):
"""Verify whether num property is 22"""
return self.num == 22
def is_23(self):
"""Verify whether num property is 23"""
return self.num == 23
def is_24(self):
"""Verify whether num property is 24"""
return self.num == 24
def is_25(self):
"""Verify whether num property is 25"""
return self.num == 25
def is_26(self):
"""Verify whether num property is 26"""
return self.num == 26
def is_27(self):
"""Verify whether num property is 27"""
return self.num == 27
def is_28(self):
"""Verify whether num property is 28"""
return self.num == 28
def is_29(self):
"""Verify whether num property is 29"""
return self.num == 29
def is_30(self):
"""Verify whether num property is 30"""
return self.num == 30
def is_31(self):
"""Verify whether num property is 31"""
return self.num == 31
def is_32(self):
"""Verify whether num property is 32"""
return self.num == 32
def is_33(self):
"""Verify whether num property is 33"""
return self.num == 33
def is_34(self):
"""Verify whether num property is 34"""
return self.num == 34
def is_35(self):
"""Verify whether num property is 35"""
return self.num == 35
def is_36(self):
"""Verify whether num property is 36"""
return self.num == 36
def is_37(self):
"""Verify whether num property is 37"""
return self.num == 37
def is_38(self):
"""Verify whether num property is 38"""
return self.num == 38
def is_39(self):
"""Verify whether num property is 39"""
return self.num == 39
def is_40(self):
"""Verify whether num property is 40"""
return self.num == 40
def is_41(self):
"""Verify whether num property is 41"""
return self.num == 41
def is_42(self):
"""Verify whether num property is 42"""
return self.num == 42
def is_43(self):
"""Verify whether num property is 43"""
return self.num == 43
def is_44(self):
"""Verify whether num property is 44"""
return self.num == 44
def is_45(self):
"""Verify whether num property is 45"""
return self.num == 45
def is_46(self):
"""Verify whether num property is 46"""
return self.num == 46
def is_47(self):
"""Verify whether num property is 47"""
return self.num == 47
def is_48(self):
"""Verify whether num property is 48"""
return self.num == 48
def is_49(self):
"""Verify whether num property is 49"""
return self.num == 49
def is_50(self):
"""Verify whether num property is 50"""
return self.num == 50
def is_51(self):
"""Verify whether num property is 51"""
return self.num == 51
def is_52(self):
"""Verify whether num property is 52"""
return self.num == 52
def is_53(self):
"""Verify whether num property is 53"""
return self.num == 53
def is_54(self):
"""Verify whether num property is 54"""
return self.num == 54
def is_55(self):
"""Verify whether num property is 55"""
return self.num == 55
def is_56(self):
"""Verify whether num property is 56"""
return self.num == 56
def is_57(self):
"""Verify whether num property is 57"""
return self.num == 57
def is_58(self):
"""Verify whether num property is 58"""
return self.num == 58
def is_59(self):
"""Verify whether num property is 59"""
return self.num == 59
def is_60(self):
"""Verify whether num property is 60"""
return self.num == 60
def is_61(self):
"""Verify whether num property is 61"""
return self.num == 61
def is_62(self):
"""Verify whether num property is 62"""
return self.num == 62
def is_63(self):
"""Verify whether num property is 63"""
return self.num == 63
def is_64(self):
"""Verify whether num property is 64"""
return self.num == 64
def is_65(self):
"""Verify whether num property is 65"""
return self.num == 65
def is_66(self):
"""Verify whether num property is 66"""
return self.num == 66
def is_67(self):
"""Verify whether num property is 67"""
return self.num == 67
def is_68(self):
"""Verify whether num property is 68"""
return self.num == 68
def is_69(self):
"""Verify whether num property is 69"""
return self.num == 69
def is_70(self):
"""Verify whether num property is 70"""
return self.num == 70
def is_71(self):
"""Verify whether num property is 71"""
return self.num == 71
def is_72(self):
"""Verify whether num property is 72"""
return self.num == 72
def is_73(self):
"""Verify whether num property is 73"""
return self.num == 73
def is_74(self):
"""Verify whether num property is 74"""
return self.num == 74
def is_75(self):
"""Verify whether num property is 75"""
return self.num == 75
def is_76(self):
"""Verify whether num property is 76"""
return self.num == 76
def is_77(self):
"""Verify whether num property is 77"""
return self.num == 77
def is_78(self):
"""Verify whether num property is 78"""
return self.num == 78
def is_79(self):
"""Verify whether num property is 79"""
return self.num == 79
def is_80(self):
"""Verify whether num property is 80"""
return self.num == 80
def is_81(self):
"""Verify whether num property is 81"""
return self.num == 81
def is_82(self):
"""Verify whether num property is 82"""
return self.num == 82
def is_83(self):
"""Verify whether num property is 83"""
return self.num == 83
def is_84(self):
"""Verify whether num property is 84"""
return self.num == 84
def is_85(self):
"""Verify whether num property is 85"""
return self.num == 85
def is_86(self):
"""Verify whether num property is 86"""
return self.num == 86
def is_87(self):
"""Verify whether num property is 87"""
return self.num == 87
def is_88(self):
"""Verify whether num property is 88"""
return self.num == 88
def is_89(self):
"""Verify whether num property is 89"""
return self.num == 89
def is_90(self):
"""Verify whether num property is 90"""
return self.num == 90
def is_91(self):
"""Verify whether num property is 91"""
return self.num == 91
def is_92(self):
"""Verify whether num property is 92"""
return self.num == 92
def is_93(self):
"""Verify whether num property is 93"""
return self.num == 93
def is_94(self):
"""Verify whether num | |
one format. Multiwords like in “we have set that out below” are recognized (depending on your NLP processor)."
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
# def test_command_line_interface(self):
# """Test the CLI."""
# runner = CliRunner()
# result = runner.invoke(cli.main)
# assert result.exit_code == 0
# # assert 'nafigator.cli.main' in result.output
# help_result = runner.invoke(cli.main, ['--help'])
# assert help_result.exit_code == 0
# assert '--help Show this message and exit.' in help_result.output
class TestNafigator_docx(unittest.TestCase):
def test_1_docx_generate_naf(self):
""" """
tree = parse2naf.generate_naf(
input=join("tests", "tests", "example.docx"),
engine="stanza",
language="en",
naf_version="v3.1",
dtd_validation=False,
params={},
nlp=None,
)
assert tree.write(join("tests", "tests", "example.docx.naf.xml")) == None
def test_2_docx_header_filedesc(self):
""" """
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.header["fileDesc"]
expected = {
"filename": "tests\\tests\\example.docx",
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
}
assert actual["filename"] == expected["filename"]
assert actual["filetype"] == expected["filetype"]
def test_3_docx_header_public(self):
""" """
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.header["public"]
expected = {
"{http://purl.org/dc/elements/1.1/}uri": "tests\\tests\\example.docx",
"{http://purl.org/dc/elements/1.1/}format": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
}
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
# def test_5_formats(self):
# assert actual == expected
def test_6_docx_entities(self):
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.entities
expected = [
{
"id": "e1",
"type": "PRODUCT",
"text": "Nafigator",
"span": [{"id": "t2"}],
},
{"id": "e2", "type": "PRODUCT", "text": "Spacy", "span": [{"id": "t13"}]},
{"id": "e3", "type": "CARDINAL", "text": "one", "span": [{"id": "t27"}]},
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_7_docx_text(self):
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.text
expected = [
{
"text": "The",
"id": "w1",
"sent": "1",
"para": "1",
"page": "1",
"offset": "0",
"length": "3",
},
{
"text": "Nafigator",
"id": "w2",
"sent": "1",
"para": "1",
"page": "1",
"offset": "4",
"length": "9",
},
{
"text": "package",
"id": "w3",
"sent": "1",
"para": "1",
"page": "1",
"offset": "14",
"length": "7",
},
{
"text": "allows",
"id": "w4",
"sent": "1",
"para": "1",
"page": "1",
"offset": "22",
"length": "6",
},
{
"text": "you",
"id": "w5",
"sent": "1",
"para": "1",
"page": "1",
"offset": "29",
"length": "3",
},
{
"text": "to",
"id": "w6",
"sent": "1",
"para": "1",
"page": "1",
"offset": "33",
"length": "2",
},
{
"text": "store",
"id": "w7",
"sent": "1",
"para": "1",
"page": "1",
"offset": "36",
"length": "5",
},
{
"text": "NLP",
"id": "w8",
"sent": "1",
"para": "1",
"page": "1",
"offset": "42",
"length": "3",
},
{
"text": "output",
"id": "w9",
"sent": "1",
"para": "1",
"page": "1",
"offset": "46",
"length": "6",
},
{
"text": "from",
"id": "w10",
"sent": "1",
"para": "1",
"page": "1",
"offset": "53",
"length": "4",
},
{
"text": "custom",
"id": "w11",
"sent": "1",
"para": "1",
"page": "1",
"offset": "58",
"length": "6",
},
{
"text": "made",
"id": "w12",
"sent": "1",
"para": "1",
"page": "1",
"offset": "65",
"length": "4",
},
{
"text": "Spacy",
"id": "w13",
"sent": "1",
"para": "1",
"page": "1",
"offset": "70",
"length": "5",
},
{
"text": "and",
"id": "w14",
"sent": "1",
"para": "1",
"page": "1",
"offset": "76",
"length": "3",
},
{
"text": "stanza",
"id": "w15",
"sent": "1",
"para": "1",
"page": "1",
"offset": "80",
"length": "6",
},
{
"text": "pipelines",
"id": "w16",
"sent": "1",
"para": "1",
"page": "1",
"offset": "87",
"length": "9",
},
{
"text": "with",
"id": "w17",
"sent": "1",
"para": "1",
"page": "1",
"offset": "97",
"length": "4",
},
{
"text": "(",
"id": "w18",
"sent": "1",
"para": "1",
"page": "1",
"offset": "102",
"length": "1",
},
{
"text": "intermediate",
"id": "w19",
"sent": "1",
"para": "1",
"page": "1",
"offset": "103",
"length": "12",
},
{
"text": ")",
"id": "w20",
"sent": "1",
"para": "1",
"page": "1",
"offset": "115",
"length": "1",
},
{
"text": "results",
"id": "w21",
"sent": "1",
"para": "1",
"page": "1",
"offset": "117",
"length": "7",
},
{
"text": "and",
"id": "w22",
"sent": "1",
"para": "1",
"page": "1",
"offset": "125",
"length": "3",
},
{
"text": "all",
"id": "w23",
"sent": "1",
"para": "1",
"page": "1",
"offset": "129",
"length": "3",
},
{
"text": "processing",
"id": "w24",
"sent": "1",
"para": "1",
"page": "1",
"offset": "133",
"length": "10",
},
{
"text": "steps",
"id": "w25",
"sent": "1",
"para": "1",
"page": "1",
"offset": "144",
"length": "5",
},
{
"text": "in",
"id": "w26",
"sent": "1",
"para": "1",
"page": "1",
"offset": "150",
"length": "2",
},
{
"text": "one",
"id": "w27",
"sent": "1",
"para": "1",
"page": "1",
"offset": "153",
"length": "3",
},
{
"text": "format",
"id": "w28",
"sent": "1",
"para": "1",
"page": "1",
"offset": "157",
"length": "6",
},
{
"text": ".",
"id": "w29",
"sent": "1",
"para": "1",
"page": "1",
"offset": "163",
"length": "1",
},
{
"text": "Multiwords",
"id": "w30",
"sent": "2",
"para": "2",
"page": "1",
"offset": "166",
"length": "10",
},
{
"text": "like",
"id": "w31",
"sent": "2",
"para": "2",
"page": "1",
"offset": "177",
"length": "4",
},
{
"text": "in",
"id": "w32",
"sent": "2",
"para": "2",
"page": "1",
"offset": "182",
"length": "2",
},
{
"text": "“",
"id": "w33",
"sent": "2",
"para": "2",
"page": "1",
"offset": "185",
"length": "1",
},
{
"text": "we",
"id": "w34",
"sent": "2",
"para": "2",
"page": "1",
"offset": "186",
"length": "2",
},
{
"text": "have",
"id": "w35",
"sent": "2",
"para": "2",
"page": "1",
"offset": "189",
"length": "4",
},
{
"text": "set",
"id": "w36",
"sent": "2",
"para": "2",
"page": "1",
"offset": "194",
"length": "3",
},
{
"text": "that",
"id": "w37",
"sent": "2",
"para": "2",
"page": "1",
"offset": "198",
"length": "4",
},
{
"text": "out",
"id": "w38",
"sent": "2",
"para": "2",
"page": "1",
"offset": "203",
"length": "3",
},
{
"text": "below",
"id": "w39",
"sent": "2",
"para": "2",
"page": "1",
"offset": "207",
"length": "5",
},
{
"text": "”",
"id": "w40",
"sent": "2",
"para": "2",
"page": "1",
"offset": "212",
"length": "1",
},
{
"text": "are",
"id": "w41",
"sent": "2",
"para": "2",
"page": "1",
"offset": "214",
"length": "3",
},
{
"text": "recognized",
"id": "w42",
"sent": "2",
"para": "2",
"page": "1",
"offset": "218",
"length": "10",
},
{
"text": "(",
"id": "w43",
"sent": "2",
"para": "2",
"page": "1",
"offset": "229",
"length": "1",
},
{
"text": "depending",
"id": "w44",
"sent": "2",
"para": "2",
"page": "1",
"offset": "230",
"length": "9",
},
{
"text": "on",
"id": "w45",
"sent": "2",
"para": "2",
"page": "1",
"offset": "240",
"length": "2",
},
{
"text": "your",
"id": "w46",
"sent": "2",
"para": "2",
"page": "1",
"offset": "243",
"length": "4",
},
{
"text": "NLP",
"id": "w47",
"sent": "2",
"para": "2",
"page": "1",
"offset": "248",
"length": "3",
},
{
"text": "processor",
"id": "w48",
"sent": "2",
"para": "2",
"page": "1",
"offset": "252",
"length": "9",
},
{
"text": ")",
"id": "w49",
"sent": "2",
"para": "2",
"page": "1",
"offset": "261",
"length": "1",
},
{
"text": ".",
"id": "w50",
"sent": "2",
"para": "2",
"page": "1",
"offset": "262",
"length": "1",
},
]
diff = DeepDiff(actual, expected)
assert diff == dict(), diff
def test_8_docx_terms(self):
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.terms
expected = [
{
"id": "t1",
"type": "open",
"lemma": "the",
"pos": "DET",
"morphofeat": "Definite=Def|PronType=Art",
"span": [{"id": "w1"}],
},
{
"id": "t2",
"type": "open",
"lemma": "Nafigator",
"pos": "PROPN",
"morphofeat": "Number=Sing",
"span": [{"id": "w2"}],
},
{
"id": "t3",
"type": "open",
"lemma": "package",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w3"}],
},
{
"id": "t4",
"type": "open",
"lemma": "allow",
"pos": "VERB",
"morphofeat": "Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w4"}],
},
{
"id": "t5",
"type": "open",
"lemma": "you",
"pos": "PRON",
"morphofeat": "Case=Acc|Person=2|PronType=Prs",
"span": [{"id": "w5"}],
},
{
"id": "t6",
"type": "open",
"lemma": "to",
"pos": "PART",
"span": [{"id": "w6"}],
},
{
"id": "t7",
"type": "open",
"lemma": "store",
"pos": "VERB",
"morphofeat": "VerbForm=Inf",
"span": [{"id": "w7"}],
},
{
"id": "t8",
"type": "open",
"lemma": "nlp",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w8"}],
},
{
"id": "t9",
"type": "open",
"lemma": "output",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w9"}],
},
{
"id": "t10",
"type": "open",
"lemma": "from",
"pos": "ADP",
"span": [{"id": "w10"}],
},
{
"id": "t11",
"type": "open",
"lemma": "custom",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w11"}],
},
{
"id": "t12",
"type": "open",
"lemma": "make",
"pos": "VERB",
"morphofeat": "Tense=Past|VerbForm=Part",
"span": [{"id": "w12"}],
},
{
"id": "t13",
"type": "open",
"lemma": "spacy",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w13"}],
},
{
"id": "t14",
"type": "open",
"lemma": "and",
"pos": "CCONJ",
"span": [{"id": "w14"}],
},
{
"id": "t15",
"type": "open",
| |
"""
"""
import gzip
import pickle
import astropy.units as u
import numpy as np
from template_builder.utilities import *
from template_builder.extend_templates import *
from ctapipe.coordinates import CameraFrame, NominalFrame, GroundFrame, \
TiltedGroundFrame
from astropy.coordinates import SkyCoord, AltAz
from astropy.time import Time
from ctapipe.io import EventSource
from ctapipe.reco import ImPACTReconstructor
from tqdm import tqdm
from ctapipe.image import tailcuts_clean, dilate
from ctapipe.calib import CameraCalibrator
from ctapipe.image.extractor import FullWaveformSum, FixedWindowSum
from ctapipe.calib.camera.gainselection import ThresholdGainSelector
class TemplateFitter:
def __init__(self, eff_fl=1,
bounds=((-5, 1), (-1.5, 1.5)),
bins=(601, 301),
min_fit_pixels=3000,
xmax_bins=np.linspace(-150, 200, 15),
offset_bins=np.array([0.0])*u.deg,
verbose=False,
rotation_angle=0 * u.deg,
tailcuts=(7, 14), min_amp=30, local_distance_cut=2.*u.deg,
gain_threshold=30000):
"""[summary]
Args:
eff_fl (int, optional): [description]. Defaults to 1.
bounds (tuple, optional): [description]. Defaults to ((-5, 1), (-1.5, 1.5)).
bins (tuple, optional): [description]. Defaults to (601, 301).
min_fit_pixels (int, optional): [description]. Defaults to 3000.
xmax_bins ([type], optional): [description]. Defaults to np.linspace(-150, 200, 15).
maximum_offset ([type], optional): [description]. Defaults to 10*u.deg.
verbose (bool, optional): [description]. Defaults to False.
rotation_angle ([type], optional): [description]. Defaults to 0*u.deg.
tailcuts (tuple, optional): [description]. Defaults to (7, 14).
min_amp (int, optional): [description]. Defaults to 30.
local_distance_cut ([type], optional): [description]. Defaults to 2.*u.deg.
gain_threshold (int, optional): [description]. Defaults to 30000.
"""
self.verbose = verbose
self.xmax_bins = xmax_bins
self.eff_fl = eff_fl
self.bounds = bounds
self.bins = bins
self.min_fit_pixels = min_fit_pixels
self.rotation_angle = rotation_angle
self.offset_bins = np.sort(offset_bins)
self.tailcuts = tailcuts
self.min_amp = min_amp
self.local_distance_cut = local_distance_cut
self.templates = dict() # Pixel amplitude
self.template_fit = dict() # Pixel amplitude
self.template_fit_kde = dict() # Pixel amplitude
self.templates_xb = dict() # Rotated X position
self.templates_yb = dict() # Rotated Y positions
self.correction = dict()
self.count = dict() # Count of events in a given template
self.count_total = 0 # Total number of events
self.gain_threshold = gain_threshold
def read_templates(self, filename, max_events=1000000):
"""
This is a pretty standard ctapipe event loop that calibrates events, rotates
them into a common frame and then stores the pixel values in a list
:param filename: str
Location of input
:param max_events: int
Maximum number of events to include in the loop
:param fill_correction: bool
Fill correction factor table
:return: tuple
Return 3 lists of amplitude and rotated x,y positions of all pixels in all
events
"""
# Create dictionaries to contain our output
if max_events > 0:
print("Warning if limiting event numbers the zero fraction may no longer be correct")
else:
max_events = 1e10
# Create a dummy time for our AltAz objects
dummy_time = Time('2010-01-01T00:00:00', format='isot', scale='utc')
if self.verbose:
print("Reading", filename.strip())
source = EventSource(filename, max_events=max_events, gain_selector_type='ThresholdGainSelector')
source.gain_selector.threshold = self.gain_threshold # Set our threshodl for gain selection
# This value is currently set for HESS, need to make this more flexible in future
calib = CameraCalibrator(subarray=source.subarray, image_extractor=FixedWindowSum(source.subarray,
window_width=16, window_shift=3, peak_index=3,
apply_integration_correction=False))
self.count_total += source.simulation_config.num_showers
grd_tel = None
num = 0 # Event counter
scaling_filled = False
for event in source:
calib(event)
alt = event.pointing.array_altitude
if alt > 90 * u.deg:
alt = 90*u.deg
point = SkyCoord(alt=alt, az=event.pointing.array_azimuth,
frame=AltAz(obstime=dummy_time))
if not scaling_filled:
xmax_scale = create_xmax_scaling(self.xmax_bins, self.offset_bins, point, filename)
scaling_filled = True
# Create coordinate objects for source position
src = SkyCoord(alt=event.simulation.shower.alt.value * u.rad,
az=event.simulation.shower.az.value * u.rad,
frame=AltAz(obstime=dummy_time))
alt_evt = event.simulation.shower.alt
if alt_evt > 90 * u.deg:
alt_evt = 90*u.deg
#print("here1", point.separation(src), self.maximum_offset)
#if point.separation(src) > self.maximum_offset:
# continue
offset_bin = find_nearest_bin(self.offset_bins, point.separation(src)).value
zen = 90 - event.simulation.shower.alt.to(u.deg).value
# Store simulated Xmax
mc_xmax = event.simulation.shower.x_max.value / np.cos(np.deg2rad(zen))
# And transform into nominal system (where we store our templates)
source_direction = src.transform_to(NominalFrame(origin=point))
# Store simulated event energy
energy = event.simulation.shower.energy
# Store ground position of all telescopes
# We only want to do this once, but has to be done in event loop
if grd_tel is None:
grd_tel = source.subarray.tel_coords
# Convert to tilted system
tilt_tel = grd_tel.transform_to(
TiltedGroundFrame(pointing_direction=point))
# Calculate core position in tilted system
grd_core_true = SkyCoord(x=np.asarray(event.simulation.shower.core_x) * u.m,
y=np.asarray(event.simulation.shower.core_y) * u.m,
z=np.asarray(0) * u.m, frame=GroundFrame())
tilt_core_true = grd_core_true.transform_to(TiltedGroundFrame(
pointing_direction=point))
# Loop over triggered telescopes
for tel_id, dl1 in event.dl1.tel.items():
# Get pixel signal
pmt_signal = dl1.image
# Get pixel coordinates and convert to the nominal system
geom = source.subarray.tel[tel_id].camera.geometry
fl = source.subarray.tel[tel_id].optics.equivalent_focal_length * \
self.eff_fl
camera_coord = SkyCoord(x=geom.pix_x, y=geom.pix_y,
frame=CameraFrame(focal_length=fl,
telescope_pointing=point))
nom_coord = camera_coord.transform_to(
NominalFrame(origin=point))
x = nom_coord.fov_lon.to(u.deg)
y = nom_coord.fov_lat.to(u.deg)
# Calculate expected rotation angle of the image
phi = np.arctan2((tilt_tel.y[tel_id - 1] - tilt_core_true.y),
(tilt_tel.x[tel_id - 1] - tilt_core_true.x)) + \
90 * u.deg
phi += self.rotation_angle
# And the impact distance of the shower
impact = np.sqrt(np.power(tilt_tel.x[tel_id - 1] - tilt_core_true.x, 2) +
np.power(tilt_tel.y[tel_id - 1] - tilt_core_true.y, 2)). \
to(u.m).value
# now rotate and translate our images such that they lie on top of one
# another
x, y = \
ImPACTReconstructor.rotate_translate(x, y,
source_direction.fov_lon,
source_direction.fov_lat,
phi)
x *= -1
# We only want to keep pixels that fall within the bounds of our
# final template
mask = np.logical_and(x > self.bounds[0][0] * u.deg,
x < self.bounds[0][1] * u.deg)
mask = np.logical_and(mask, y < self.bounds[1][1] * u.deg)
mask = np.logical_and(mask, y > self.bounds[1][0] * u.deg)
mask510 = tailcuts_clean(geom, pmt_signal,
picture_thresh=self.tailcuts[0],
boundary_thresh=self.tailcuts[1],
min_number_picture_neighbors=1)
amp_sum = np.sum(pmt_signal[mask510])
x_cent = np.sum(pmt_signal[mask510] * x[mask510]) / amp_sum
y_cent = np.sum(pmt_signal[mask510] * y[mask510]) / amp_sum
mask = mask510
for i in range(4):
mask = dilate(geom, mask)
# Make our preselection cuts
if amp_sum < self.min_amp and np.sqrt(x_cent**2 + y_cent**2) < self.local_distance_cut:
continue
# Make sure everything is 32 bit
x = x[mask].astype(np.float32)
y = y[mask].astype(np.float32)
image = pmt_signal[mask].astype(np.float32)
zen = 90 - alt_evt.to(u.deg).value
# Store simulated Xmax
mc_xmax = event.simulation.shower.x_max.value / np.cos(np.deg2rad(zen))
# Calc difference from expected Xmax (for gammas)
exp_xmax =xmax_expectation(energy.value)
x_diff = mc_xmax - exp_xmax
x_diff_bin = find_nearest_bin(self.xmax_bins, x_diff)
az = point.az.to(u.deg).value
zen = 90. - point.alt.to(u.deg).value
# Now fill up our output with the X, Y and amplitude of our pixels
key = zen, az, energy.value, int(impact), x_diff_bin, offset_bin
if (key) in self.templates.keys():
# Extend the list if an entry already exists
self.templates[key].extend(image)
self.templates_xb[key].extend(x.to(u.deg).value)
self.templates_yb[key].extend(y.to(u.deg).value)
self.count[key] = self.count[key] + (1 * xmax_scale[(x_diff_bin, offset_bin)])
else:
self.templates[key] = image.tolist()
self.templates_xb[key] = x.value.tolist()
self.templates_yb[key] = y.value.tolist()
self.count[key] = 1 * xmax_scale[(x_diff_bin, offset_bin)]
if num > max_events:
return self.templates, self.templates_xb, self.templates_yb
num += 1
return self.templates, self.templates_xb, self.templates_yb
def fit_templates(self, amplitude, x_pos, y_pos,
make_variance_template, max_fitpoints):
"""
Perform MLP fit over a dictionary of pixel lists
:param amplitude: dict
Dictionary of pixel amplitudes for each template
:param x_pos: dict
Dictionary of x position for each template
:param y_pos: dict
Dictionary of y position for each template
:param make_variance_template: bool
Should we also make a template of variance
:param max_fitpoints: int
Maximum number of points to include in MLP fit
:return: dict
Dictionary of image templates
"""
if self.verbose:
print("Fitting Templates")
# Create output dictionary
templates_out = dict()
variance_templates_out = dict()
# Create grid over which to evaluate our fit
x = np.linspace(self.bounds[0][0], self.bounds[0][1], self.bins[0])
y = np.linspace(self.bounds[1][0], self.bounds[1][1], self.bins[1])
xx, yy = np.meshgrid(x, y)
grid = np.vstack((xx.ravel(), yy.ravel()))
first = True
# Loop over all templates
for key in tqdm(list(amplitude.keys())):
if self.verbose and first:
print("Energy", key[2], "TeV")
first = False
amp = np.array(amplitude[key])
if self.verbose:
print("Template key:", key)
# Skip if we do not have enough image pixels
if len(amp) < self.min_fit_pixels:
continue
y = y_pos[key]
x = x_pos[key]
# Stack up pixel positions
pixel_pos = np.vstack([x, y])
# Fit with MLP
template_output = self.perform_fit(amp, pixel_pos, max_fitpoints)
templates_out[key] = template_output.astype(np.float32)
#if make_variance_template:
# need better plan for var templates
return templates_out, variance_templates_out
def perform_fit(self, amp, pixel_pos, max_fitpoints=None,
nodes=(64, 64, 64, 64, 64, 64, 64, 64, 64)):
"""
Fit MLP model to individual template pixels
:param amp: ndarray
Pixel amplitudes
:param pixel_pos: ndarray
Pixel XY coordinate format (N, 2)
:param max_fitpoints: int
Maximum number of points to include in MLP fit
:param nodes: tuple
Node layout of MLP
:return: MLP
Fitted MLP model
"""
pixel_pos = pixel_pos.T
# If we put a limit on this then randomly choose points
if max_fitpoints is not None and amp.shape[0] > max_fitpoints:
indices = np.arange(amp.shape[0])
np.random.shuffle(indices)
amp = amp[indices[:max_fitpoints]]
| |
import warnings
from collections import defaultdict
from typing import Union
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from .. import ModelVisualization
from ..utils import get_num_of_steps_allowed, is_mixed_precision, listify
from ..utils.regularizers import LegacyRegularizer
from .callbacks import managed_callbacks
from .input_modifiers import Jitter, Rotate2D
from .regularizers import Norm, TotalVariation2D
class ActivationMaximization(ModelVisualization):
"""ActivationMaximization.
"""
def __call__(
self,
score,
seed_input=None,
input_range=(0, 255),
input_modifiers=[Jitter(jitter=4), Rotate2D(degree=1)],
regularizers=[TotalVariation2D(weight=1.0),
Norm(weight=0.3, p=1)],
steps=200,
optimizer=None, # When None, the default is tf.optimizers.RMSprop(1.0, 0.999)
gradient_modifier=None,
callbacks=None,
training=False,
unconnected_gradients=tf.UnconnectedGradients.NONE,
activation_modifiers=None,
_compatible_mode=False, # Hidden option.
) -> Union[np.ndarray, list]:
"""Generate the model inputs that maximize the output of the given `score` functions.
By default, this method is optimized to visualize `tf.keras.application.VGG16` model.
So if you want to visualize other models, you have to tune the parameters of this method.
Args:
score: A :obj:`tf_keras_vis.utils.scores.Score` instance, function or a list of them.
For example of the Score instance to specify visualizing target::
scores = CategoricalScore([1, 294, 413])
The code above means the same with the one below::
score = lambda outputs: (outputs[0][1], outputs[1][294], outputs[2][413])
When the model has multiple outputs, you MUST pass a list of
Score instances or functions. For example::
from tf_keras_vis.utils.scores import CategoricalScore, InactiveScore
score = [
CategoricalScore([1, 23]), # For 1st model output
InactiveScore(), # For 2nd model output
...
]
seed_input: A tf.Tensor, :obj:`numpy.ndarray` or a list of them to input in the model.
When `None`, the seed_input value will be automatically generated from a uniform
distribution. If you want to visualize multiple images (i.e., batch_size > 1),
you have to pass a seed_input object. For example::
seed_input = tf.random.uniform((samples, ..., channels), low, high)
Farthermore, if the model has multiple inputs and you want multiple images,
you have to do as follows::
seed_input = [
tf.random.uniform((samples, ..., channels), low, high), # 1st input
tf.random.uniform((samples, ..., channels), low, high), # 2nd input
...
]
Defaults to None.
input_range: A tuple of two int values or a list of them. The tuple indicates
`(min, max)` values that is range of the result of this method. If the model has
multiple inputs, you can use different input ranges for each model input by
passing list of tuples. For example::
input_range = [
(0, 255), # The 1st model input's range
(-1.0, 1.0), # The 2nd model input's range
...
]
When `None` or `(None, None)` tuple, the input tensor
(i.e., the result of this method) will be not applied any limitation.
Defaults to (0, 255).
input_modifiers: A :obj:`tf_keras_vis.activation_maximization.input_modifiers.
InputModifier` instance, a function, a list of them when the model has a single
input. For example::
input_modifiers = [Jitter(jitter=8), Rotate(degree=3), Scale(high=1.1)]
When the model has multiple inputs, you have to pass a dictionary
that contains the lists of input modifiers for each model inputs::
input_modifiers = {
'input_1': [Jitter(jitter=8), Rotate(degree=3), Scale(high=1.1)],
'input_2': [Jitter(jitter=8)],
...
}
Or you could also pass a list of lists of input modifiers for each model inputs as
follows::
input_modifiers = [
[Jitter(jitter=8), Rotate(degree=3), Scale(high=1.1)], # For 1st input
[Jitter(jitter=8)], # For 2nd input
...
]
Defaults to [Jitter(jitter=4), Rotate(degree=1)].
regularizers: A :obj:`tf_keras_vis.utils.regularizers.Regularizer` instance,
a list of regularizers or a list that has lists of regularizers for each input.
For example::
regularizers = [TotalVariation2D(weight=1.0), Norm(weight=0.3, p=1)]
> Please notice that `regularizes` does NOT accept function object like
`input_modifiers`.
When the model has multiple inputs, you have to pass a dictionary
that contains the lists of regularizers for each model inputs::
regularizers = {
'input_1': [TotalVariation2D(weight=1.0), Norm(weight=0.3, p=1)],
'input_2': [Norm(weight=1.0, p=2)],
...
}
Or you could also pass a list of lists of regularizers for each model inputs as
follows::
regularizers = [
[TotalVariation2D(weight=1.0), Norm(weight=0.3, p=1)], # For 1st input
[Norm(weight=1.0, p=2)], # For 2nt input
...
]
Defaults to [TotalVariation2D(weight=1.0), Norm(weight=0.3, p=1)].
steps: The number of gradient descent iterations. Defaults to 200.
optimizer: A `tf.optimizers.Optimizer` instance. When None, it will be automatically
created. Defaults to `tf.optimizers.RMSprop(learning_rate=1.0, rho=0.999)`.
gradient_modifier: A function to modify gradients.
Defaults to None.
callbacks: A :obj:`tf_keras_vis.activation_maximization.callbacks.Callback` instance
or a list of them. Defaults to None.
training: A bool that indicates whether the model's training-mode on or off.
Defaults to False.
unconnected_gradients: Specifies the gradient value returned when the given input
tensors are unconnected.
Defaults to tf.UnconnectedGradients.NONE.
activation_modifiers: A function or a dictionary of them (the key is input layer's
name). When the model has multiple inputs, you have to pass a dictionary::
activation_modifiers = {
'input_1': lambda x: ...,
'input_2': lambda x: ...,
...
}
This functions will be executed before returning the result. Defaults to None.
Returns:
An :obj:`numpy.ndarray` when the model has a single input.
When the model has multiple inputs, a list of :obj:`numpy.ndarray`.
Raises:
:obj:`ValueError`: When there is any invalid arguments.
"""
arguments = dict(
(k, v) for k, v in locals().items() if k != 'self' and not k.startswith('_'))
# Check model
mixed_precision_model = is_mixed_precision(self.model)
# optimizer
optimizer = self._get_optimizer(optimizer, mixed_precision_model)
# scores
scores = self._get_scores_for_multiple_outputs(score)
# Get initial seed-inputs
input_ranges = self._get_input_ranges(input_range)
seed_inputs = self._get_seed_inputs(seed_input, input_ranges)
# input_modifiers
input_modifiers = self._get_input_modifiers(input_modifiers)
# regularizers
regularizers = self._get_regularizers(regularizers)
# activation_modifiers
activation_modifiers = self._get_activation_modifiers(activation_modifiers)
with managed_callbacks(**arguments) as callbacks:
input_values = seed_inputs
input_variables = [tf.Variable(X) for X in input_values]
for step in range(get_num_of_steps_allowed(steps)):
# Modify input values
for i, name in enumerate(self.model.input_names):
for modifier in input_modifiers[name]:
input_values[i] = modifier(input_values[i])
# Copy input values to variables
if _compatible_mode:
input_variables = [
tf.Variable(tf.cast(X, tf.float16) if mixed_precision_model else X)
for X in input_values
]
else:
for V, X in zip(input_variables, input_values):
V.assign(X)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(input_variables)
if _compatible_mode:
input_values = input_variables
else:
input_values = [V.value() for V in input_variables]
# Calculate scores
outputs = self.model(input_values, training=training)
outputs = listify(outputs)
score_values = self._calculate_scores(outputs, scores)
# Calculate regularization
regularization_values, regularized_score_values = \
self._calculate_regularization(regularizers, input_values, score_values)
# Scale loss
if mixed_precision_model:
regularized_score_values = [
optimizer.get_scaled_loss(score_value)
for score_value in regularized_score_values
]
# Calculate gradients and Update variables
grads = tape.gradient(regularized_score_values,
input_variables,
unconnected_gradients=unconnected_gradients)
grads = listify(grads)
if mixed_precision_model:
grads = optimizer.get_unscaled_gradients(grads)
if gradient_modifier is not None:
grads = [gradient_modifier(g) for g in grads]
optimizer.apply_gradients(zip(grads, input_variables))
# Update input values
input_values = [V.value() for V in input_variables]
if _compatible_mode and mixed_precision_model:
input_values = [tf.cast(X, tf.float32) for X in input_values]
# Calculate clipped values
clipped_value = self._clip_and_modify(input_values, input_ranges,
activation_modifiers)
# Execute callbacks
for callback in callbacks:
callback(step,
clipped_value,
grads,
score_values,
outputs,
regularizations=regularization_values,
overall_score=regularized_score_values)
if len(self.model.inputs) == 1 and (seed_input is None
or not isinstance(seed_input, list)):
clipped_value = clipped_value[0]
return clipped_value
def _calculate_regularization(self, regularizers, seed_inputs, score_values):
if isinstance(regularizers, list):
regularization_values = [(regularizer.name, regularizer(seed_inputs))
for regularizer in regularizers]
else:
regularization_values = ([
(name, regularizer(seed_inputs[i]))
for name, regularizer in regularizers[input_layer_name].items()
] for i, input_layer_name in enumerate(self.model.input_names))
regularization_values = sum(regularization_values, [])
regularized_score_values = [-1.0 * score_value for score_value in score_values]
regularized_score_values += [value for _, value in regularization_values]
return regularization_values, regularized_score_values
def _get_optimizer(self, optimizer, mixed_precision_model):
if optimizer is None:
optimizer = tf.optimizers.RMSprop(learning_rate=1.0, rho=0.999)
if mixed_precision_model:
try:
# Wrap optimizer
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer)
except ValueError as e:
raise ValueError(
"The same `optimizer` instance should be NOT used twice or more."
" You can be able to avoid this error by creating new optimizer instance"
" each calling __call__().") from e
return optimizer
def _get_input_ranges(self, input_range):
input_ranges = listify(input_range,
return_empty_list_if_none=False,
convert_tuple_to_list=False)
if len(input_ranges) == 1 and len(self.model.inputs) > 1:
input_ranges = input_ranges * len(self.model.inputs)
input_ranges = [(None, None) if r is None else r for r in input_ranges]
for i, r in enumerate(input_ranges):
if len(r) != 2:
raise ValueError(
"The length of input range tuple must be 2 (Or it is just `None`, not tuple), "
f"but you passed {r} as `input_ranges[{i}]`.")
a, b = r
if None not in r and type(a) != type(b):
raise TypeError(
"The type of low and high values in the input range must be the same, "
f"but you passed {r} are {type(a)} and {type(b)} ")
return input_ranges
def _get_seed_inputs(self, seed_inputs, input_ranges):
# Prepare seed_inputs
seed_inputs = listify(seed_inputs)
if len(seed_inputs) == 0:
# Replace | |
array."""
with pytest.raises(ValueError):
second_derivative(deriv_1d_data.values[None, :].T, x=deriv_1d_data.x, axis=1)
def test_second_derivative_scalar_delta():
"""Test second_derivative with a scalar passed for a delta."""
df_dx = second_derivative(np.arange(3), delta=1)
assert_array_almost_equal(df_dx, np.array([0., 0., 0.]), 6)
def test_laplacian(deriv_1d_data):
"""Test laplacian with simple 1D data."""
laplac = laplacian(deriv_1d_data.values, coordinates=(deriv_1d_data.x,))
# Worked by hand
truth = np.ones_like(deriv_1d_data.values) * 0.2133333 * units('delta_degC/cm**2')
assert_array_almost_equal(laplac, truth, 5)
def test_laplacian_2d(deriv_2d_data):
"""Test lapacian with full 2D arrays."""
laplac_true = 2 * (np.ones_like(deriv_2d_data.f) * (deriv_2d_data.a + deriv_2d_data.b))
laplac = laplacian(deriv_2d_data.f, coordinates=(deriv_2d_data.y, deriv_2d_data.x))
assert_array_almost_equal(laplac, laplac_true, 5)
def test_parse_angle_abbrieviated():
"""Test abbrieviated directional text in degrees."""
expected_angles_degrees = FULL_CIRCLE_DEGREES
output_angles_degrees = parse_angle(DIR_STRS[:-1])
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_ext():
"""Test extended (unabbrieviated) directional text in degrees."""
test_dir_strs = ['NORTH', 'NORTHnorthEast', 'North_East', 'East__North_East',
'easT', 'east south east', 'south east', ' south southeast',
'SOUTH', 'SOUTH SOUTH WEST', 'southWEST', 'WEST south_WEST',
'WeSt', 'WestNorth West', 'North West', 'NORTH north_WeSt']
expected_angles_degrees = np.arange(0, 360, 22.5) * units.degree
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_mix_multiple():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = ['NORTH', 'nne', 'ne', 'east north east',
'easT', 'east se', 'south east', ' south southeast',
'SOUTH', 'SOUTH SOUTH WEST', 'sw', 'WEST south_WEST',
'w', 'wnw', 'North West', 'nnw']
expected_angles_degrees = FULL_CIRCLE_DEGREES
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_none():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = None
expected_angles_degrees = np.nan
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_invalid_number():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = 365.
expected_angles_degrees = np.nan
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_invalid_arr():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = ['nan', None, np.nan, 35, 35.5, 'north', 'andrewiscool']
expected_angles_degrees = [np.nan, np.nan, np.nan, np.nan, np.nan, 0, np.nan]
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_mix_multiple_arr():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = np.array(['NORTH', 'nne', 'ne', 'east north east',
'easT', 'east se', 'south east', ' south southeast',
'SOUTH', 'SOUTH SOUTH WEST', 'sw', 'WEST south_WEST',
'w', 'wnw', 'North West', 'nnw'])
expected_angles_degrees = FULL_CIRCLE_DEGREES
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angles_array():
"""Test array of angles to parse."""
angles = np.array(['N', 'S', 'E', 'W'])
expected_angles = np.array([0, 180, 90, 270]) * units.degree
calculated_angles = parse_angle(angles)
assert_array_almost_equal(calculated_angles, expected_angles)
def test_parse_angles_series():
"""Test pandas.Series of angles to parse."""
angles = pd.Series(['N', 'S', 'E', 'W'])
expected_angles = np.array([0, 180, 90, 270]) * units.degree
calculated_angles = parse_angle(angles)
assert_array_almost_equal(calculated_angles, expected_angles)
def test_parse_angles_single():
"""Test single input into `parse_angles`."""
calculated_angle = parse_angle('SOUTH SOUTH EAST')
expected_angle = 157.5 * units.degree
assert_almost_equal(calculated_angle, expected_angle)
def test_gradient_2d(deriv_2d_data):
"""Test gradient with 2D arrays."""
res = gradient(deriv_2d_data.f, coordinates=(deriv_2d_data.y, deriv_2d_data.x))
truth = (np.array([[-0.25, -0.25, -0.25],
[1.75, 1.75, 1.75],
[4.75, 4.75, 4.75],
[5.75, 5.75, 5.75]]),
np.array([[-3, -1, 4],
[-3, -1, 4],
[-3, -1, 4],
[-3, -1, 4]]))
assert_array_almost_equal(res, truth, 5)
def test_gradient_4d(deriv_4d_data):
"""Test gradient with 4D arrays."""
res = gradient(deriv_4d_data, deltas=(1, 1, 1, 1))
truth = tuple(factor * np.ones_like(deriv_4d_data) for factor in (48., 16., 4., 1.))
assert_array_almost_equal(res, truth, 8)
def test_gradient_restricted_axes(deriv_2d_data):
"""Test 2D gradient with 3D arrays and manual specification of axes."""
res = gradient(deriv_2d_data.f[..., None], coordinates=(deriv_2d_data.y, deriv_2d_data.x),
axes=(0, 1))
truth = (np.array([[[-0.25], [-0.25], [-0.25]],
[[1.75], [1.75], [1.75]],
[[4.75], [4.75], [4.75]],
[[5.75], [5.75], [5.75]]]),
np.array([[[-3], [-1], [4]],
[[-3], [-1], [4]],
[[-3], [-1], [4]],
[[-3], [-1], [4]]]))
assert_array_almost_equal(res, truth, 5)
def test_bounding_indices():
"""Test finding bounding indices."""
data = np.array([[1, 2, 3, 1], [5, 6, 7, 8]])
above, below, good = find_bounding_indices(data, [1.5, 7], axis=1, from_below=True)
assert_array_equal(above[1], np.array([[1, 0], [0, 3]]))
assert_array_equal(below[1], np.array([[0, -1], [-1, 2]]))
assert_array_equal(good, np.array([[True, False], [False, True]]))
def test_bounding_indices_above():
"""Test finding bounding indices from above."""
data = np.array([[1, 2, 3, 1], [5, 6, 7, 8]])
above, below, good = find_bounding_indices(data, [1.5, 7], axis=1, from_below=False)
assert_array_equal(above[1], np.array([[3, 0], [0, 3]]))
assert_array_equal(below[1], np.array([[2, -1], [-1, 2]]))
assert_array_equal(good, np.array([[True, False], [False, True]]))
def test_angle_to_direction():
"""Test single angle in degree."""
expected_dirs = DIR_STRS[:-1] # UND at -1
output_dirs = [angle_to_direction(angle) for angle in FULL_CIRCLE_DEGREES]
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_edge():
"""Test single angle edge case (360 and no units) in degree."""
expected_dirs = 'N'
output_dirs = angle_to_direction(360)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_list():
"""Test list of angles in degree."""
expected_dirs = DIR_STRS[:-1]
output_dirs = list(angle_to_direction(FULL_CIRCLE_DEGREES))
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_arr():
"""Test array of angles in degree."""
expected_dirs = DIR_STRS[:-1]
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_full():
"""Test the `full` keyword argument, expecting unabbrieviated output."""
expected_dirs = [
'North', 'North North East', 'North East', 'East North East',
'East', 'East South East', 'South East', 'South South East',
'South', 'South South West', 'South West', 'West South West',
'West', 'West North West', 'North West', 'North North West'
]
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES, full=True)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_invalid_scalar():
"""Test invalid angle."""
expected_dirs = UND
output_dirs = angle_to_direction(None)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_invalid_arr():
"""Test array of invalid angles."""
expected_dirs = ['NE', UND, UND, UND, 'N']
output_dirs = angle_to_direction(['46', None, np.nan, None, '362.'])
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_level_4():
"""Test non-existent level of complexity."""
with pytest.raises(ValueError) as exc:
angle_to_direction(FULL_CIRCLE_DEGREES, level=4)
assert 'cannot be less than 1 or greater than 3' in str(exc.value)
def test_angle_to_direction_level_3():
"""Test array of angles in degree."""
expected_dirs = DIR_STRS[:-1] # UND at -1
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES, level=3)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_level_2():
"""Test array of angles in degree."""
expected_dirs = [
'N', 'N', 'NE', 'NE', 'E', 'E', 'SE', 'SE',
'S', 'S', 'SW', 'SW', 'W', 'W', 'NW', 'NW'
]
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES, level=2)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_level_1():
"""Test array of angles in degree."""
expected_dirs = [
'N', 'N', 'N', 'E', 'E', 'E', 'E', 'S', 'S', 'S', 'S',
'W', 'W', 'W', 'W', 'N']
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES, level=1)
assert_array_equal(output_dirs, expected_dirs)
def test_azimuth_range_to_lat_lon():
"""Test conversion of azimuth and range to lat/lon grid."""
az = [332.2403, 334.6765, 337.2528, 339.73846, 342.26257]
rng = [2125., 64625., 127125., 189625., 252125., 314625.]
clon = -89.98416666666667
clat = 32.27972222222222
output_lon, output_lat = azimuth_range_to_lat_lon(az, rng, clon, clat)
true_lon = [[-89.9946968, -90.3061798, -90.6211612, -90.9397425, -91.2620282,
-91.5881257],
[-89.9938369, -90.2799198, -90.5692874, -90.8620385, -91.1582743,
-91.4580996],
[-89.9929086, -90.251559, -90.5132417, -90.7780507, -91.0460827,
-91.3174374],
[-89.9919961, -90.2236737, -90.4581161, -90.6954113, -90.9356497,
-91.178925],
[-89.9910545, -90.1948876, -90.4011921, -90.6100481, -90.8215385,
-91.0357492]]
true_lat = [[32.2966329, 32.7936114, 33.2898102, 33.7852055, 34.2797726,
34.773486],
[32.2969961, 32.804717, 33.3117799, 33.8181643, 34.3238488,
34.8288114],
[32.2973461, 32.8154229, 33.3329617, 33.8499452, 34.3663556,
34.8821746],
[32.29765, 32.8247204, 33.3513589, 33.8775516, 34.4032838,
34.9285404],
[32.2979242, 32.8331062, 33.367954, 33.9024562, 34.4366016,
34.9703782]]
assert_array_almost_equal(output_lon, true_lon, 6)
assert_array_almost_equal(output_lat, true_lat, 6)
def test_azimuth_range_to_lat_lon_diff_ellps():
"""Test conversion of azimuth and range to lat/lon grid."""
az = [332.2403, 334.6765, 337.2528, 339.73846, 342.26257]
rng = [2125., 64625., 127125., 189625., 252125., 314625.]
clon = -89.98416666666667
clat = 32.27972222222222
output_lon, output_lat = azimuth_range_to_lat_lon(az, rng, clon, clat, Geod(ellps='WGS84'))
true_lon = [[-89.9946749, -90.3055083, -90.6198256, -90.9377279, -91.2593193,
-91.5847066],
[-89.9938168, -90.279303, -90.5680603, -90.860187, -91.1557841,
-91.4549558],
[-89.9928904, -90.2510012, -90.5121319, -90.7763758, -91.0438294,
-91.3145919],
[-89.9919799, -90.2231741, -90.4571217, -90.6939102, -90.9336298,
-91.1763737],
[-89.9910402, -90.194448, -90.4003169, -90.6087268, -90.8197603,
-91.0335027]]
true_lat = [[32.2966791, 32.794996, 33.2924932, 33.7891466, 34.2849315,
34.7798223],
[32.2970433, 32.8061309, 33.3145188, 33.8221862, 34.3291116,
34.835273],
[32.2973942, 32.816865, 33.3357544, 33.8540448, 34.3717184,
34.8887564],
[32.297699, 32.826187, 33.3541984, 33.8817186, 34.4087331,
34.9352264],
[32.2979739, 32.834595, 33.3708355, 33.906684, 34.4421288,
34.9771578]]
assert_array_almost_equal(output_lon, true_lon, 6)
assert_array_almost_equal(output_lat, true_lat, 6)
def test_3d_gradient_3d_data_no_axes(deriv_4d_data):
"""Test 3D gradient with 3D data and no axes parameter."""
test = deriv_4d_data[0]
res = gradient(test, deltas=(1, 1, 1))
truth = tuple(factor * np.ones_like(test) for factor in (16., 4., 1.))
assert_array_almost_equal(res, truth, 8)
def test_2d_gradient_3d_data_no_axes(deriv_4d_data):
"""Test for failure of 2D gradient with 3D data and no axes parameter."""
test = deriv_4d_data[0]
with pytest.raises(ValueError) as exc:
gradient(test, deltas=(1, 1))
assert 'must match the number of dimensions' in str(exc.value)
def test_3d_gradient_2d_data_no_axes(deriv_4d_data):
"""Test for failure of 3D gradient with 2D data and no axes parameter."""
test = deriv_4d_data[0, 0]
with pytest.raises(ValueError) as exc:
gradient(test, deltas=(1, 1, 1))
assert 'must match the number of dimensions' in str(exc.value)
def test_2d_gradient_4d_data_2_axes_3_deltas(deriv_4d_data):
"""Test 2D gradient of 4D data with 2 axes and 3 deltas."""
res = gradient(deriv_4d_data, deltas=(1, 1, 1), axes=(-2, -1))
truth = tuple(factor * np.ones_like(deriv_4d_data) for factor in (4., 1.))
assert_array_almost_equal(res, truth, 8)
def test_2d_gradient_4d_data_2_axes_2_deltas(deriv_4d_data):
"""Test 2D gradient of 4D data with 2 axes and 2 deltas."""
res = gradient(deriv_4d_data, deltas=(1, 1), axes=(0, 1))
truth = tuple(factor * np.ones_like(deriv_4d_data) for factor in (48., 16.))
assert_array_almost_equal(res, truth, 8)
def test_2d_gradient_4d_data_2_axes_1_deltas(deriv_4d_data):
"""Test for failure of 2D gradient of 4D data with 2 axes and 1 deltas."""
with pytest.raises(ValueError) as exc:
gradient(deriv_4d_data, deltas=(1, ), axes=(1, 2))
assert 'cannot be less than that of "axes"' in str(exc.value)
def test_first_derivative_xarray_lonlat(test_da_lonlat):
| |
<filename>kasaya/core/protocol/transport/py3bson.py
#!/usr/bin/env python3
#coding: utf-8
###############################################################################
#
# This module implements BSON parsing and serialization in accordance
# to version 1.0 of the specification, http://bsonspec.org/#/specification
#
# Usage:
#
# import bson
#
# b = bson.serialize_to_bytes({ "key": "value" }) # returns bytes
# d = bson.parse_bytes(b) # returns ordered dict
#
# bson.serialize_to_stream({ "key": "value" }, s) # writes to the stream
# d = bson.parse_stream(s) # returns ordered dict
#
# Notes:
#
# Certain BSON types have their natural counterparts in Python types,
# which are transparently converted back and forth at serialization
# and parsing. Specifically,
#
# int <-> BSON_Int32 (or BSON_Int64 depending on the value)
# str <-> BSON_String
# bytes <-> BSON_Binary_Generic
# float <-> BSON_Double
# bool <-> BSON_Boolean
# datetime <-> BSON_Datetime
# None <-> BSON_Null
# dict <-> BSON_Document
# list <-> BSON_Array
#
# Therefore you can serialize native Python types:
# b = bson.serialize_to_bytes({ "s": [ "foo", 123 ] })
# and parsing result is also in native types:
# assert bson.parse_bytes(b) == { "s": [ "foo", 123 ] }
#
# But some other BSON types have no such natural conversions,
# so you have to both create the objects manually:
# b = bson.serialize_to_bytes({ "r": bson.BSON_Regex(("^$", "i")) })
# and after parsing examine their "value" property:
# d = bson.parse_bytes(b); r = d["r"]
# assert isinstance(r, bson.BSON_Regex) and r.value == ("^$", "i")
#
# Additional notes:
#
# This module makes use of two utility modules typecheck.py and
# expected.py (both from Pythomnic3k framework) if they are present.
# You can download them from http://www.pythomnic3k.org/ or you
# can simply ignore the warnings if those modules are not found.
#
# Pythomnic3k project
# (c) 2005-2012, <NAME> <<EMAIL>>
# Distributed under BSD license
#
###############################################################################
__all__ = [
# public methods
"parse_bytes", "parse_stream", "serialize_to_bytes", "serialize_to_stream",
# class hierarchy
"BSON_Value",
"BSON_Double",
"BSON_String",
"BSON_Document",
"BSON_Array",
"BSON_Binary",
"BSON_Binary_Generic",
"BSON_Binary_Function",
"BSON_Binary_UUID",
"BSON_Binary_MD5",
"BSON_Binary_UserDefined",
"BSON_ObjectId",
"BSON_Boolean",
"BSON_Datetime",
"BSON_Null",
"BSON_Regex",
"BSON_JavaScript",
"BSON_Symbol",
"BSON_JavaScriptWithScope",
"BSON_Int32",
"BSON_Timestamp",
"BSON_Int64",
# errors hierarchy
"BSON_Error",
"BSON_AbstractError",
"BSON_ParsingError",
"BSON_ConversionError",
# utilities
"cstrify",
]
###############################################################################
import io; from io import BytesIO
import struct; from struct import pack, unpack
import datetime; from datetime import datetime
import calendar; from calendar import timegm
import collections; from collections import OrderedDict as odict
import binascii; from binascii import b2a_hex
try:
import typecheck
except ImportError:
#print("warning: module typecheck.py cannot be imported, type checking is skipped")
typecheck = lambda x: x; optional = callable = with_attr = lambda *args: True
else:
from typecheck import typecheck, optional, callable, with_attr
###############################################################################
cstrify = lambda s: s.encode("utf-8") + b"\x00"
###############################################################################
class BSON_Error(Exception): pass
class BSON_AbstractError(BSON_Error): pass
class BSON_ParsingError(BSON_Error): pass
class BSON_ConversionError(BSON_Error): pass
###############################################################################
class BSON_Value:
def __init__(self, *args, **kwargs):
raise BSON_AbstractError("cannot instantiate abstract base class")
# assuming the real "value" of the object is in self._value
value = property(lambda self: self._value)
# subclasses override this for bs->py value conversion
def _py_value(self):
return self
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self._value == other._value
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "{0:s}({1})".format(self.__class__.__name__, self._value)
def __repr__(self):
return "<{0:s} at 0x{1:08x}>".format(self, id(self))
# subclasses are registered with their codes and conversion rules
_types = {} # maps byte codes to BSON_Type's for parsing
@classmethod
def _register_type(cls, type):
cls._types[type._code] = type
# redirects parsing to an appropriate BSON_Type by code
@classmethod
def parse(cls, code, stream):
type = cls._types.get(code)
if not type:
raise BSON_ParsingError("unknown type code 0x{0:02x} at offset {1:d}".\
format(int(code[0]), stream.tell())) # tested
return type.parse(stream)
# utility methods for stream reading
@staticmethod
def _read(stream, n) -> bytes:
r = stream.read(n)
if len(r) != n:
raise BSON_ParsingError("unexpected end of stream") # tested
return r
@staticmethod
def _read_null(stream) -> bytes:
r = b""
while True:
b = BSON_Value._read(stream, 1)
if b == b"\x00":
return r
r += b
###############################################################################
class BSON_Double(BSON_Value):
_code = b"\x01"
@typecheck
def __init__(self, value: float):
self._value = value
def _py_value(self):
return self._value
def serialize(self, stream):
stream.write(pack("<d", self._value))
@classmethod
def parse(cls, stream):
return cls(unpack("<d", cls._read(stream, 8))[0])
BSON_Value._register_type(BSON_Double)
###############################################################################
class BSON_String(BSON_Value):
_code = b"\x02"
@typecheck
def __init__(self, value: str):
self._value = value
def __str__(self):
return "{0:s}('{1:s}')".format(self.__class__.__name__, self._value)
def _py_value(self):
return self._value
def serialize(self, stream):
value_b = self._value.encode("utf-8")
stream.write(pack("<l", len(value_b) + 1))
stream.write(value_b); stream.write(b"\x00")
@classmethod
def parse(cls, stream):
length = unpack("<l", cls._read(stream, 4))[0]
if length <= 0:
raise BSON_ParsingError("incorrect string length at offset {0:d}".\
format(stream.tell())) # tested
value_b = cls._read(stream, length)
if value_b[-1] != 0:
raise BSON_ParsingError("expected null terminated string "
"at offset {0:d}".format(stream.tell()))
return cls(value_b[:-1].decode("utf-8")) # tested
BSON_Value._register_type(BSON_String)
###############################################################################
class BSON_Document(BSON_Value):
_code = b"\x03"
@typecheck
def __init__(self, value: dict):
self._value = value
def _py_value(self):
return odict((k, v._py_value()) for k, v in self._value.items())
def serialize(self, stream):
e_list = BytesIO()
for k, v in self._value.items():
e_list.write(v._code)
e_list.write(cstrify(k))
v.serialize(e_list)
e_list_b = e_list.getvalue()
stream.write(pack("<l", len(e_list_b) + 5))
stream.write(e_list_b); stream.write(b"\x00")
@classmethod
def parse(cls, stream):
length = unpack("<l", cls._read(stream, 4))[0]
if length <= 4:
raise BSON_ParsingError("incorrect structure length at offset {0:d}".\
format(stream.tell())) # tested
epos = stream.tell() + length - 5
doc = odict()
while stream.tell() < epos:
code_b = cls._read(stream, 1)
key_b = cls._read_null(stream)
doc[key_b.decode("utf-8")] = BSON_Value.parse(code_b, stream)
if stream.tell() != epos:
raise BSON_ParsingError("read beyond end of structure "
"at offset {0:d}".format(stream.tell())) # tested
if cls._read(stream, 1) != b"\x00":
raise BSON_ParsingError("expected null terminated structure "
"at offset {0:d}".format(stream.tell())) # tested
return cls(doc)
BSON_Value._register_type(BSON_Document)
###############################################################################
class BSON_Array(BSON_Value):
_code = b"\x04"
@typecheck
def __init__(self, value: list):
self._value = value
def _py_value(self):
return [ v._py_value() for v in self._value ]
def serialize(self, stream):
d = odict((str(i), v) for i, v in enumerate(self._value))
BSON_Document(d).serialize(stream)
@classmethod
def parse(cls, stream):
d = BSON_Document.parse(stream).value
return cls([ d[str(i)] for i in range(len(d)) ])
BSON_Value._register_type(BSON_Array)
###############################################################################
class BSON_Binary(BSON_Value):
_code = b"\x05"
_subtypes = {}
def __init__(self, *args, **kwargs):
raise BSON_AbstractError("cannot instantiate abstract binary class")
@classmethod
def _register_subtype(cls, subtype, subcode = None):
cls._subtypes[subtype._subcode if subcode is None else subcode] = subtype
def serialize(self, stream):
stream.write(pack("<l", len(self._value)))
stream.write(self.__class__._subcode)
stream.write(self._value)
@classmethod
def parse(cls, stream):
length = unpack("<l", cls._read(stream, 4))[0]
if length <= 0:
raise BSON_ParsingError("incorrect structure length at offset {0:d}".\
format(stream.tell())) # tested
code = cls._read(stream, 1)
content_b = cls._read(stream, length)
return cls._subtypes[code](content_b) # treated as opaque bytes
BSON_Value._register_type(BSON_Binary)
###############################################################################
class BSON_Binary_Generic(BSON_Binary):
_subcode = b"\00"
@typecheck
def __init__(self, value: bytes):
self._value = value
def _py_value(self):
return self._value
BSON_Binary._register_subtype(BSON_Binary_Generic)
BSON_Binary._register_subtype(BSON_Binary_Generic, b"\x02") # deprecated alternative
###############################################################################
class BSON_Binary_Function(BSON_Binary):
_subcode = b"\01"
@typecheck
def __init__(self, value: bytes):
self._value = value
BSON_Binary._register_subtype(BSON_Binary_Function)
###############################################################################
class BSON_Binary_UUID(BSON_Binary):
_subcode = b"\03"
@typecheck
def __init__(self, value: bytes):
self._value = value
BSON_Binary._register_subtype(BSON_Binary_UUID)
###############################################################################
class BSON_Binary_MD5(BSON_Binary):
_subcode = b"\05"
@typecheck
def __init__(self, value: bytes):
self._value = value
BSON_Binary._register_subtype(BSON_Binary_MD5)
###############################################################################
class BSON_Binary_UserDefined(BSON_Binary):
_subcode = b"\x80"
@typecheck
def __init__(self, value: bytes):
self._value = value
BSON_Binary._register_subtype(BSON_Binary_UserDefined)
###############################################################################
class BSON_ObjectId(BSON_Value):
_code = b"\x07"
@typecheck
def __init__(self, value: lambda x: isinstance(x, bytes) and len(x) == 12):
self._value = value
def __str__(self):
return "{0:s}(0x{1:s})".format(self.__class__.__name__,
b2a_hex(self._value).decode("ascii"))
def serialize(self, stream):
stream.write(self._value)
@classmethod
def parse(cls, stream):
return cls(cls._read(stream, 12))
BSON_Value._register_type(BSON_ObjectId)
###############################################################################
class BSON_Boolean(BSON_Value):
_code = b"\x08"
@typecheck
def __init__(self, value: bool):
self._value = value
def _py_value(self):
return self._value
def serialize(self, stream):
stream.write(self._value and b"\x01" or b"\x00")
@classmethod
def parse(cls, stream):
b = cls._read(stream, 1)[0]
if b not in (0, 1):
raise BSON_ParsingError("incorrect boolean value at offset {0:d}".\
format(stream.tell())) # tested
return cls(b == 1)
BSON_Value._register_type(BSON_Boolean)
###############################################################################
class BSON_Datetime(BSON_Value):
_code = b"\x09"
@typecheck
def __init__(self, value: datetime):
self._value = value
def _py_value(self):
return self._value
def serialize(self, stream):
epoch_ms = int(timegm(self._value.utctimetuple()) * 1000 + self._value.microsecond // 1000)
BSON_Int64(epoch_ms).serialize(stream)
@classmethod
def parse(cls, stream):
epoch_ms = BSON_Int64.parse(stream).value
return cls(datetime.utcfromtimestamp(epoch_ms / 1000.0))
BSON_Value._register_type(BSON_Datetime)
###############################################################################
class BSON_Null(BSON_Value):
_code = b"\x0a"
@typecheck
def __init__(self, value: type(None)):
self._value = value
def __str__(self):
return "{0:s}()".format(self.__class__.__name__)
def _py_value(self):
return self._value
def serialize(self, stream):
pass
@classmethod
def parse(cls, stream):
return cls(None)
BSON_Value._register_type(BSON_Null)
###############################################################################
class BSON_Regex(BSON_Value):
_code = b"\x0b"
@typecheck
def __init__(self, value: (str, str)):
self._value = value
def __str__(self):
return "{0:s}('{1[0]:s}', '{1[1]:s}')".\
format(self.__class__.__name__, self._value)
def serialize(self, stream):
stream.write(cstrify(self._value[0]))
stream.write(cstrify(self._value[1]))
@classmethod
def parse(cls, stream):
pattern_b = cls._read_null(stream)
options_b = cls._read_null(stream)
return cls((pattern_b.decode("utf-8"), options_b.decode("utf-8")))
BSON_Value._register_type(BSON_Regex)
###############################################################################
class BSON_JavaScript(BSON_Value):
_code = b"\x0d"
@typecheck
def __init__(self, value: str):
self._value = value
def __str__(self):
return "{0:s}('{1:s}')".format(self.__class__.__name__, self._value)
def serialize(self, stream):
BSON_String(self._value).serialize(stream)
@classmethod
def parse(cls, stream):
return cls(BSON_String.parse(stream).value)
BSON_Value._register_type(BSON_JavaScript)
###############################################################################
class BSON_Symbol(BSON_Value):
_code = b"\x0e"
@typecheck
def __init__(self, value: str):
self._value = value
def serialize(self, stream):
BSON_String(self._value).serialize(stream)
@classmethod
def parse(cls, stream):
return cls(BSON_String.parse(stream).value)
BSON_Value._register_type(BSON_Symbol)
###############################################################################
class BSON_JavaScriptWithScope(BSON_Value):
_code = b"\x0f"
@typecheck
def __init__(self, value: (str, BSON_Document)):
self._value = value
def __str__(self):
return "{0:s}('{1[0]:s}', {1[1]:s})".\
format(self.__class__.__name__, self._value)
def serialize(self, stream):
codews = BytesIO()
BSON_String(self._value[0]).serialize(codews)
self._value[1].serialize(codews)
codews_b = codews.getvalue()
stream.write(pack("<l", len(codews_b) + 4))
stream.write(codews_b)
@classmethod
def parse(cls, stream):
length = unpack("<l", cls._read(stream, 4))[0]
if length <= 13:
raise BSON_ParsingError("incorrect structure length at offset {0:d}".\
format(stream.tell())) # tested
epos = stream.tell() + length - 4
| |
from kiwoom import config
from kiwoom.config import valid_event
from functools import wraps
from textwrap import dedent
from types import LambdaType
from inspect import (
getattr_static,
ismethod,
isfunction,
isclass,
ismodule
)
class Connector:
"""
Decorator class for mapping empty events to user implementing slots.
This class helps mapping events to specific slots. 'Kiwoom' instance
contains mapping information which had been set from two methods.
1) Kiwoom.connect(event, signal, slot)
2) Kiwoom.set_connect_hook(event, param)
This decorator does not contain those mapping information but only
uses the one defined in instance. This is because decorator should
work on various contexts. The first parameter of wrapper function
in __call__ method, i.e. api, is 'self' argument for Kiwoom object.
This class has three main methods.
1) Connector.map(event)
- act as a decorator for pre-defined Kiwoom events
- below is the usage example
class Kiwoom(API):
...
@map
def on_event_connect(self, err_code):
pass
...
2) Connector.mute(bool) # static method
- warning message can be turned on/off.
3) Connector.connectable(fn) # static method
- Check if given fn is a bounded method to an instance.
- If fn is not a bounded method, it should be static or lambda.
- Bounded method is important to handle continuous information
"""
# Class variable
nargs = {
'on_event_connect': 1,
'on_receive_msg': 4,
'on_receive_tr_data': 5,
'on_receive_real_data': 3,
'on_receive_chejan_data': 3,
'on_receive_condition_ver': 2,
'on_receive_tr_condition': 5,
'on_receive_real_condition': 4
}
def __init__(self):
# If no hook is set, dic[event] returns signal/slot.
# If hook is set, dic[event][key] returns signal/slot.
self._hooks = dict()
self._signals = dict()
self._slots = dict()
self._indices = dict()
def signal(self, event, key=None):
"""
Returns signal methods connected to the event.
If signal and slot are connected to a specific event by Kiwoom.connect() method,
then this method returns the connected signal method. If signal is not connected,
or wrong key is given, raise a KeyError.
'key' is needed when hook is set by Kiwoom.set_connect_hook(). 'key' is set to
be the name of signal method by default unless another string is set on purpose
when connecting.
When requesting data to server is needed, specifically if more data is available,
Kiwoom.signal() returns the exact signal method that can request more data.
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:param key: str, optional
If hook is set by Kiwoom.set_connect_hook() method and signal is connected
by Kiwoom.connect(), then key is needed. 'key' is set to be name of the
signal method by default unless another 'key' is given when connecting.
:return: method
Signal method connected to the given event. If wrong event, returns None.
"""
if not valid_event(event):
return None
if not self.connect_hook(event):
return self._signals[event]
return self._signals[event][key]
def slot(self, event, key=None):
"""
Returns slot methods connected to the event.
If signal and slot are connected to specific event by Kiwoom.connect() method,
then this method returns the connected slot method. If slot is not connected,
or wrong key is given, this raises a KeyError.
'key' is needed when hook is set by Kiwoom.set_connect_hook(). 'key' is set to
be the name of slot method by default unless another string is set on purpose
when connecting.
When an event is called, Kiwoom.slot() returns the exact slot method that can
handle data received from the event. This method is used in Connector decorator
that wraps events to execute connected slot with the event.
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:param key: str, optional
If hook is set by Kiwoom.set_connect_hook() method and slot is connected
by Kiwoom.connect(), then key is needed. 'key' is set to be name of the
slot method by default unless another 'key' is given when connecting.
:return: method or None
Slot method connected to the given event. If wrong event, returns None.
"""
if not valid_event(event):
return None
if not self.connect_hook(event):
return self._slots[event]
return self._slots[event][key]
def connect(self, event, signal=None, slot=None, key=None):
"""
Connects signals and slots to one of pre-defined events.
Information saved in this method is used by decorator @Connector() which wraps
the events and automatically calls the right slot connected to the events. In
addition to the decorator, Kiwoom.signal(event, key) and Kiwoom.slot(event, key)
returns the one connected to the event.
1) If no hook is set on the event, then the connected signal/slot can be retrieved
by Kiwoom.signal(event) and Kiwoom.slot(event). There is no need to use key.
2) If hook is set by Kiwoom.set_connect_hook() on the event, in which case there
needs multiple slots to connect on one event, then connection requires a key
which is to be the name of signal/slot methods by default.
The convention to utilizing this module recommends to define the name of related
signal and slot to be the same. Then it becomes easier to manage and develop codes.
Use 'key' arg only when there is a special need. The connected signal/slot can be
retrieved by Kiwoom.signal(event, key='name') and Kiwoom.slot(event, key='name').
Here 'name' can be a method name or special 'key' used in this method.
This method checks whether or not given signal/slot can be called without any
problem. If given method is not bounded to an instance, method should be static
or lambda function. This is because normally 'self' argument is needed to call
methods, therefore method must be bounded to an instance unless given method is
a function.
Please see tutorials example on the link below.
https://github.com/breadum/kiwoom/blob/main/tutorials/4.%20TR%20Data.py
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:param signal: method, optional
A method that requests to the server
:param slot: method, optional
A method that reacts the server's response
:param key: str, optional
Key is needed only if hook is set by Kiwoom.set_connect_hook() method.
Key is set to be name of the given signal and/or slot method by default.
If key is given other than method name, the connected signal can be
retrieved by Kiwoom.siganl(event, key) and slot by Kiwoom.slot(event, key)
"""
valid = False
connectable = Connector.connectable
if not valid_event(event):
return
# Directly connect slot to the event
if not self.connect_hook(event):
# Key can't be used here
if key is not None:
raise RuntimeError(
"Key can't be used. Remove key argument or Try to set_connect_hook() first."
)
elif connectable(signal):
if connectable(slot):
valid = True
self._signals[event] = signal
self._slots[event] = slot
elif connectable(slot):
valid = True
self._slots[event] = slot
# Connect slot to the event when
else:
if connectable(signal):
if connectable(slot):
valid = True
# Key other than method's name
if key is not None:
self._signals[event][key] = signal
self._slots[event][key] = slot
# Default key is method's name
else:
self._signals[event][getattr(signal, '__name__')] = signal
self._slots[event][getattr(slot, '__name__')] = slot
elif connectable(slot):
valid = True
if key is not None:
self._slots[event][key] = slot
else:
self._slots[event][getattr(slot, '__name__')] = slot
# Nothing is connected
if not valid:
raise RuntimeError(f"Unsupported combination of inputs. Please read below.\n\n{self.connect.__doc__}")
def connect_hook(self, event):
"""
Returns whether a hook is set for given event.
:param event: str
One of the pre-defined event names in string. See kiwoom.config.EVENTS.
:return: bool
"""
if event in self._hooks:
return True
return False
def set_connect_hook(self, event, param):
"""
Set parameter defined in event as a hook to find the right slot when event is called.
When an event needs multiple slots to connect, depending on specific tasks, set
a hook(key) to select which slot to map. The hook must be one of the parameters
in the definition of the event method. Parameters can be found by help built-in
function or Kiwoom.api_arg_spec(event). This raises a KeyError if given param is
not defined in event method.
If hook is set to the given parameter, argument passed into the parameter when
the event is called, is going to be a key to connect event, signal and slot.
Convention is that the name of signal and slot that deal with the related task
is recommended to be the same, so that 'key' is set to be | |
# Version 2018.06.05
import GenericHazards
import string, time, os, re, types, copy, LogStream, collections
import ModuleAccessor, SampleAnalysis, EditAreaUtils, VTECTable
import math
import Tkinter
import LocalizationSupport
from AbsTime import *
from com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceData, ReferenceID
from com.raytheon.uf.common.dataplugin.gfe.grid import Grid2DBit as JavaGrid2DBit
AWIPS_ENVIRON = "AWIPS2"
import HLSTCV_Common
class TextProduct(HLSTCV_Common.TextProduct):
Definition = copy.deepcopy(GenericHazards.TextProduct.Definition)
Definition["displayName"] = "None"
Definition["outputFile"] = "{prddir}/TEXT/HLS.txt"
Definition["database"] = "Official" # Source database
Definition["debug"] = 1
Definition["mapNameForCombinations"] = "Zones_<site>"
Definition["defaultEditAreas"] = "Combinations_HLS_<site>"
Definition["showZoneCombiner"] = 1 # 1 to cause zone combiner to display
Definition["productName"] = "Local Statement"
Definition["fullStationID" ] = "<fullStationID>"
Definition["wmoID" ] = "<wmoID>"
Definition["wfoCityState" ] = "<wfoCityState>"
Definition["pil" ] = "<pil>"
Definition["textdbPil" ] = "<textdbPil>"
Definition["awipsWANPil" ] = "<awipsWANPil>"
Definition["site"] = "<site>"
Definition["wfoCity"] = "<wfoCity>"
Definition["areaName"] = "" #optional area name for product
Definition["areaDictionary"] = "AreaDictionary"
Definition["language"] = "english"
Definition["lineLength"] = 71 #Maximum line length
Definition["tabLength"] = 4
Definition["purgeTime"] = 8 # Default Expiration in hours if
Definition["includeZoneNames"] = 1 # Zone names will be included in the area header
Definition["includeIssueTime"] = 0 # Issue Time will be included in the area header
Definition["easPhrase"] = \
"URGENT - IMMEDIATE BROADCAST REQUESTED" # Optional EAS phrase to be include in product header
Definition["callToAction"] = 1
# Add options for debugging
Definition["debug"] = {
#TextProduct
"__init__": 0,
"_inlandAreas": 0,
"_coastalAreas": 0,
"_cwa": 0,
"_cwa_descriptor": 0,
"_localReferencePoints": 0,
"_localReferencePoints_defaults": 0,
"_referencePointLimit": 0,
"_productParts_HLS": 0,
"_analysisList_HLS": 0,
"_analysisList_HLS_WholeDomain": 0,
"_intersectAnalysisList_HLS": 0,
"generateForecast": 0,
"_initializeVariables": 0,
"_initializeHeadlines": 0,
"_initializeSamplingDict": 0,
"_noOpParts": 0,
"_areaList": 0,
"_summaryHeadlines": 0,
"_changesHazards": 0,
"_currentHazards": 0,
"_stormInformation": 0,
"_situationOverview": 0,
"_windSection": 0,
"_surgeSection": 0,
"_floodingRainSection": 0,
"_tornadoSection": 0,
"_coastalHazardsSection": 0,
"_preparednessSection": 0,
"_evacuationStatements": 0,
"_otherPreparednessActions": 0,
"_additionalSourcesInfo": 0,
"_nextUpdate": 0,
"_impactsKeyFunction": 0,
"_getPotentialImpactsStatements": 0,
"_impactCategoryToThreatLevel": 0,
"_determineHazardStates": 0,
"_sampleHLSData": 0,
"_sampleTCVAdvisory": 0,
"_sampleRankedDiscreteValue": 0,
"_sampleMostSignificantDiscreteValue": 0,
"_getDominantThreatLevel": 0,
"_getHighestThreat": 0,
"_getLowestThreat": 0,
"_setHazardImpactCategories": 0,
"_createWholeDomainEditArea": 0,
"_determineHazards": 0,
"_formatLocalTime": 0,
"_getTimeZoneList": 0,
"_grabHeadline": 0,
"_getStormInfo": 0,
"_grabStormInfo": 0,
"_decodeStormInfo": 0,
"_expandBearings": 0,
"_removeKM": 0,
"_cleanText": 0,
"_calcLocalReferences": 0,
"_calcReference": 0,
"_distanceFromLatLon": 0,
"_bearing": 0,
"_dirInEnglish": 0,
"_overview_list": 0,
"_displayGUI": 0,
"_frame": 0,
#HLSTCV_Common
"allowedHazards": 0,
"allowedHeadlines": 0,
"_initializeVariables": 0,
"moderated_dict": 0,
"_wmoHeader": 0,
"_productHeader": 0,
"_ugcHeader": 0,
"_processProductParts": 0,
"_createProductDictionary": 0,
"_initializeProductDictionary": 0,
"_formatProductDictionary": 0,
"_getStatValue": 0,
"_allAreas": 0,
"_groupSegments": 0,
"_getSegmentVTECRecordsTuples": 0,
"_computeIntersectAreas": 0,
"_initializeHazardsTable": 0,
"_getHazardsTable": 0,
"_ignoreActions": 0,
"_setVTECActiveTable": 0,
"_getVtecRecords": 0,
"_getAllowedHazardList": 0,
"_altFilterMethod": 0,
"_filterHazards": 0,
"_getAdditionalHazards": 0,
"_checkHazard": 0,
"_initializeTimeVariables": 0,
"_determineTimeRanges": 0,
"_createPeriodList": 0,
"_calculateStartTime": 0,
"_formatPeriod": 0,
"_getTimeDesc": 0,
"_getPartOfDay": 0,
"_initializeStormInformation": 0,
"_parseTCP": 0,
"_getStormTypeFromTCP": 0,
"_getStormNameFromTCP": 0,
"_getAdvisoryTypeFromTCP": 0,
"_getAdvisoryNumberStringFromTCP": 0,
"_getStormNumberStringFromTCP": 0,
"_getStormIDStringFromTCP": 0,
"_useTestTCP": 0,
"_testTCP": 0,
"_initializeAdvisories": 0,
"_synchronizeAdvisories": 0,
"_getLocalAdvisoryDirectoryPath": 0,
"_getStormAdvisoryNames": 0,
"_loadLastTwoAdvisories": 0,
"_loadAdvisory": 0,
"_getAdvisoryPath": 0,
"_getAdvisoryFilename": 0,
"_processVariableList": 0,
"_GUI_sizing_dict": 0,
"_GUI1_configDict": 0,
"_font_GUI_dict": 0,
#Overview_Dialog
"body": 0,
"_makeStep3": 0,
"_makeButtons": 0,
"okCB": 0,
#Common_Dialog
"getVarDict": 0,
"_makeRadioOrCheckList": 0,
"_makeEntry": 0,
"cancelCB": 0,
"_entryName": 0,
"_makeTuple": 0,
"_setVarDict": 0,
"status": 0,
"buttonbox": 0,
#LegacyFormatter
"execute": 0,
"_processProductParts": 0,
"processWmoHeader": 0,
"processProductHeader": 0,
"processSummaryHeadlines": 0,
"processHazards": 1,
"_addToGroupedHazards": 1,
"_sortHazardsType": 0,
"_consolidateGroupedHazards": 1,
"_createHazardTextParts": 0,
"_areaWords": 0,
"processStormInformation": 0,
"processSituationOverview": 0,
"processHazardsSection": 0,
"processSubParts": 0,
#TextProductCommon
"setUp": 0,
"hazardTimeZones": 0,
"getExpireTime": 0,
"getHeadlinesAndSections": 0,
"formatUGCs": 0,
"getFormattedTime": 0,
"formatUGC_names": 0,
"formatNameString": 0,
"getVal": 0,
"formatDatetime": 0,
"flush": 0,
"makeUGCString": 0,
"checkLastArrow": 0,
}
# Definition["debug"] = 1 # turn on ALL debug messages
Definition["debug"] = 0 # turn off ALL debug messages
def __init__(self):
HLSTCV_Common.TextProduct.__init__(self)
#####################################################################
#####################################################################
### Organization of Formatter Code
###############################################################
### MUST OVERRIDE DEFINITIONS !!!
### _inlandAreas, _coastalAreas, _cwa, _cwa_descriptor,
### _localReferencePoints, _localReferencePoints_defaults
###############################################################
###############################################################
### Optional Overrides
### _referencePointLimit
###############################################################
###############################################################
### HLS Product and Segment Parts Definition
###############################################################
###############################################################
### Analysis Lists, SampleAnalysis Overrides and other
### analysis related methods
###############################################################
###############################################################
# CODE
###############################################################
### High level flow of formatter
### generateForecast, _initializeVariables,
### _loadLastTwoAdvisories, _determineTimeRanges,
### _initializeSamplingDict, _sampleTCVAdvisory,
### _sampleHLSData, _determineHazardStates,
### _setHazardImpactCategories, _createProductDictionary,
### _formatProductDictionary
###############################################################
###############################################################
### Product Parts Implementation
###############################################################
###############################################################
### Sampling and Statistics related methods
###############################################################
###############################################################
### Area, Zone and Segment related methods
###############################################################
###############################################################
### Hazards related methods
###############################################################
###############################################################
### Time related methods
###############################################################
###############################################################
### Storm Information and TCP related methods
###############################################################
###############################################################
### GUI related methods
###############################################################
###############################################################
### MUST OVERRIDE DEFINITIONS !!!
def _inlandAreas(self):
return [
#"FLZ063", "FLZ066", "FLZ067", "FLZ068", "FLZ070",
#"FLZ071", "FLZ072", "FLZ073", "FLZ074",
]
def _coastalAreas(self):
return [
#"FLZ069", "FLZ075", "FLZ168", "FLZ172", "FLZ173", "FLZ174",
]
def _cwa(self):
return "" #"MFL"
def _cwa_descriptor(self):
return "" #"South Florida"
def _localReferencePoints(self):
# Give the name and lat/lon for each local reference point
return [
#("West Palm Beach, FL", (26.71, -80.06)),
#("Fort Lauderdale, FL", (26.12, -80.15)),
#("Miami, FL", (25.77, -80.20)),
#("Miami Beach, FL", (25.81, -80.13)),
#("Naples, FL", (26.14, -81.80)),
#("Marco Island, FL", (25.94, -81.73)),
]
def _localReferencePoints_defaults(self):
# Give a list of the local reference point names to be
# turned on by default
return [] #["Miami, FL", "Naples, FL"]
###############################################################
### Optional Overrides
def _referencePointLimit(self):
# Give the number of reference points allowed to be chosen
# Also give a label (e.g. "two") for the GUI
return (2, "two")
###############################################################
### HLS Product and Segment Parts Definition
def _productParts_HLS(self, segment_vtecRecords_tuples):
partsList = [
'wmoHeader',
'ugcHeader',
'productHeader',
'areaList',
'summaryHeadlines',
'newInformationHeader',
'changesHazards',
'currentHazards',
'stormInformation',
'situationOverview',
'sigPotentialImpacts',
]
if self._ImpactsAnticipated:
includedImpacts = sorted(self._IncludedImpacts, key=self._impactsKeyFunction)
for ((_, sectionName), _) in includedImpacts:
self.debug_print("adding section = '%s'" % (sectionName), 1)
partsList.append(sectionName)
partsList.append('preparednessSection')
if self._ImpactsAnticipated:
partsList.append('evacuationStatements')
partsList.append('otherPreparednessActions')
partsList.append('additionalSourcesInfo')
partsList.append('nextUpdate')
partsList.append('endProduct')
self.debug_print("Product Parts partsList =\n\n%s\n" % (self._pp.pformat(partsList)), 1)
return {
'partsList': partsList
}
###############################################################
### Analysis Lists, SampleAnalysis Overrides and other
### analysis related methods
def _analysisList_HLS(self):
# Sample over 120 hours beginning at current time
analysisList = [
# Wind Section
("WindThreat", self.rankedDiscreteValue),
("WindThreat", self.mostSignificantDiscreteValue),
# Flooding Rain Section
("QPFtoFFGRatio", self.moderatedMax, [6]),
("FloodingRainThreat", self.rankedDiscreteValue),
("FloodingRainThreat", self.mostSignificantDiscreteValue),
# Tornado Section
("TornadoThreat", self.rankedDiscreteValue),
("TornadoThreat", self.mostSignificantDiscreteValue),
]
return analysisList
def _analysisList_HLS_WholeDomain(self):
# Sample over 120 hours beginning at current time
analysisList = [
# Wind Section
("Wind", self.vectorModeratedMax, [6]),
]
return analysisList
def _intersectAnalysisList_HLS(self):
# The grids for the Surge Section will be intersected with a special edit area
analysisList = [
("InundationMax", self.moderatedMax, [6]),
("StormSurgeThreat", self.rankedDiscreteValue),
("StormSurgeThreat", self.mostSignificantDiscreteValue),
]
return analysisList
###############################################################
### High level flow of formatter
def generateForecast(self, argDict):
# Generate Text Phrases for a list of edit areas
error = self._initializeVariables(argDict)
if error is not None:
return error
if self._stormName is None or self._stormName == "":
return "Could not determine the storm name"
if self._ImpactsAnticipated:
self._loadLastTwoAdvisories()
if (self._previousAdvisory is None or \
not self._previousAdvisoryMatchesNumber):
return "A TCV must be transmitted before an HLS can be run"
if len(self._IncludedImpacts) == 0:
return "At least one potential impact section needs to be included."
# Determine time ranges
self._determineTimeRanges(argDict)
if self._ImpactsAnticipated:
# Sample the data
self._initializeSamplingDict()
self._sampleTCVAdvisory(self._previousAdvisory)
self._sampleHLSData(argDict)
self._determineHazardStates()
for threatName in ['WindThreat', 'StormSurgeThreat', 'FloodingRainThreat', 'TornadoThreat']:
self._setHazardImpactCategories(threatName)
# Create the product dictionary and format it to create the output
productDict = self._createProductDictionary(self._productParts_HLS,
self._allAreas(),
areProductPartsSegmented=False)
productOutput = self._formatProductDictionary(LegacyFormatter, productDict)
return productOutput
def _initializeVariables(self, argDict):
error = HLSTCV_Common.TextProduct._initializeVariables(self, argDict)
if error is not None:
return error
self._getStormInfo(argDict)
self._initializeHeadlines()
#=======================================================================
# Now produce a UGC header using only the WFO selected zones
#=======================================================================
# Get the Combinations file for the HLS
accessor = ModuleAccessor.ModuleAccessor()
self.debug_print("self._defaultEditAreas = %s" % (self._pp.pformat(self._defaultEditAreas)), 1)
# combos is a list of tuples. Each tuple is a grouping of zones
# (a list of zones, combo name).
combos = accessor.variable(self._defaultEditAreas, "Combinations")
# If we could not find a Combinations file for the HLS
if combos is None:
LogStream.logVerbose("Combination file not found: " + self._pp.pformat(self._defaultEditAreas))
# Default to using the entire CWA
self._ugcs = sorted(self._allAreas())
# Otherwise, construct the final list of WFO selected zones
else:
self.debug_print("Segments from Zone Combiner = %s" % (self._pp.pformat(combos)), 1)
# Create a list containing all zones from all combination groups
selectedZones = reduce(lambda zones, combo: zones + combo[0],
combos,
[])
# Use the selected zones for the UGC header
self._ugcs = sorted(selectedZones)
self.debug_print("Final Zones for UGC header = %s" % (self._pp.pformat(self._ugcs)), | |
For example, the key ``PRODUCT`` is valid for all supported operating system types. The key ``MSRC_SEVERITY`` , however, is valid only for Windows operating systems, and the key ``SECTION`` is valid only for Ubuntu operating systems.
Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.
**Windows Operating Systems**
The supported keys for Windows operating systems are ``PRODUCT`` , ``CLASSIFICATION`` , and ``MSRC_SEVERITY`` . See the following lists for valid values for each of these keys.
*Supported key:* ``PRODUCT``
*Supported values:*
* ``Windows7``
* ``Windows8``
* ``Windows8.1``
* ``Windows8Embedded``
* ``Windows10``
* ``Windows10LTSB``
* ``WindowsServer2008``
* ``WindowsServer2008R2``
* ``WindowsServer2012``
* ``WindowsServer2012R2``
* ``WindowsServer2016``
* ``WindowsServer2019``
* ``*`` *Use a wildcard character (*) to target all supported operating system versions.*
*Supported key:* ``CLASSIFICATION``
*Supported values:*
* ``CriticalUpdates``
* ``DefinitionUpdates``
* ``Drivers``
* ``FeaturePacks``
* ``SecurityUpdates``
* ``ServicePacks``
* ``Tools``
* ``UpdateRollups``
* ``Updates``
* ``Upgrades``
*Supported key:* ``MSRC_SEVERITY``
*Supported values:*
* ``Critical``
* ``Important``
* ``Moderate``
* ``Low``
* ``Unspecified``
**Ubuntu Operating Systems**
The supported keys for Ubuntu operating systems are ``PRODUCT`` , ``PRIORITY`` , and ``SECTION`` . See the following lists for valid values for each of these keys.
*Supported key:* ``PRODUCT``
*Supported values:*
* ``Ubuntu14.04``
* ``Ubuntu16.04``
* ``*`` *Use a wildcard character (*) to target all supported operating system versions.*
*Supported key:* ``PRIORITY``
*Supported values:*
* ``Required``
* ``Important``
* ``Standard``
* ``Optional``
* ``Extra``
*Supported key:* ``SECTION``
Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.
**Amazon Linux Operating Systems**
The supported keys for Amazon Linux operating systems are ``PRODUCT`` , ``CLASSIFICATION`` , and ``SEVERITY`` . See the following lists for valid values for each of these keys.
*Supported key:* ``PRODUCT``
*Supported values:*
* ``AmazonLinux2012.03``
* ``AmazonLinux2012.09``
* ``AmazonLinux2013.03``
* ``AmazonLinux2013.09``
* ``AmazonLinux2014.03``
* ``AmazonLinux2014.09``
* ``AmazonLinux2015.03``
* ``AmazonLinux2015.09``
* ``AmazonLinux2016.03``
* ``AmazonLinux2016.09``
* ``AmazonLinux2017.03``
* ``AmazonLinux2017.09``
* ``*`` *Use a wildcard character (*) to target all supported operating system versions.*
*Supported key:* ``CLASSIFICATION``
*Supported values:*
* ``Security``
* ``Bugfix``
* ``Enhancement``
* ``Recommended``
* ``Newpackage``
*Supported key:* ``SEVERITY``
*Supported values:*
* ``Critical``
* ``Important``
* ``Medium``
* ``Low``
**Amazon Linux 2 Operating Systems**
The supported keys for Amazon Linux 2 operating systems are ``PRODUCT`` , ``CLASSIFICATION`` , and ``SEVERITY`` . See the following lists for valid values for each of these keys.
*Supported key:* ``PRODUCT``
*Supported values:*
* ``AmazonLinux2``
* ``AmazonLinux2.0``
* ``*`` *Use a wildcard character (*) to target all supported operating system versions.*
*Supported key:* ``CLASSIFICATION``
*Supported values:*
* ``Security``
* ``Bugfix``
* ``Enhancement``
* ``Recommended``
* ``Newpackage``
*Supported key:* ``SEVERITY``
*Supported values:*
* ``Critical``
* ``Important``
* ``Medium``
* ``Low``
**RedHat Enterprise Linux (RHEL) Operating Systems**
The supported keys for RedHat Enterprise Linux operating systems are ``PRODUCT`` , ``CLASSIFICATION`` , and ``SEVERITY`` . See the following lists for valid values for each of these keys.
*Supported key:* ``PRODUCT``
*Supported values:*
* ``RedhatEnterpriseLinux6.5``
* ``RedhatEnterpriseLinux6.6``
* ``RedhatEnterpriseLinux6.7``
* ``RedhatEnterpriseLinux6.8``
* ``RedhatEnterpriseLinux6.9``
* ``RedhatEnterpriseLinux7.0``
* ``RedhatEnterpriseLinux7.1``
* ``RedhatEnterpriseLinux7.2``
* ``RedhatEnterpriseLinux7.3``
* ``RedhatEnterpriseLinux7.4``
* ``RedhatEnterpriseLinux7.5``
* ``RedhatEnterpriseLinux7.6``
* ``*`` *Use a wildcard character (*) to target all supported operating system versions.*
*Supported key:* ``CLASSIFICATION``
*Supported values:*
* ``Security``
* ``Bugfix``
* ``Enhancement``
* ``Recommended``
* ``Newpackage``
*Supported key:* ``SEVERITY``
*Supported values:*
* ``Critical``
* ``Important``
* ``Medium``
* ``Low``
**SUSE Linux Enterprise Server (SLES) Operating Systems**
The supported keys for SLES operating systems are ``PRODUCT`` , ``CLASSIFICATION`` , and ``SEVERITY`` . See the following lists for valid values for each of these keys.
*Supported key:* ``PRODUCT``
*Supported values:*
* ``Suse12.0``
* ``Suse12.1``
* ``Suse12.2``
* ``Suse12.3``
* ``Suse12.4``
* ``Suse12.5``
* ``Suse12.6``
* ``Suse12.7``
* ``Suse12.8``
* ``Suse12.9``
* ``*`` *Use a wildcard character (*) to target all supported operating system versions.*
*Supported key:* ``CLASSIFICATION``
*Supported values:*
* ``Security``
* ``Recommended``
* ``Optional``
* ``Feature``
* ``Document``
* ``Yast``
*Supported key:* ``SEVERITY``
*Supported values:*
* ``Critical``
* ``Important``
* ``Moderate``
* ``Low``
**CentOS Operating Systems**
The supported keys for CentOS operating systems are ``PRODUCT`` , ``CLASSIFICATION`` , and ``SEVERITY`` . See the following lists for valid values for each of these keys.
*Supported key:* ``PRODUCT``
*Supported values:*
* ``CentOS6.5``
* ``CentOS6.6``
* ``CentOS6.7``
* ``CentOS6.8``
* ``CentOS6.9``
* ``CentOS7.0``
* ``CentOS7.1``
* ``CentOS7.2``
* ``CentOS7.3``
* ``CentOS7.4``
* ``CentOS7.5``
* ``CentOS7.6``
* ``*`` *Use a wildcard character (*) to target all supported operating system versions.*
*Supported key:* ``CLASSIFICATION``
*Supported values:*
* ``Security``
* ``Bugfix``
* ``Enhancement``
* ``Recommended``
* ``Newpackage``
*Supported key:* ``SEVERITY``
*Supported values:*
* ``Critical``
* ``Important``
* ``Medium``
* ``Low``
- **Key** *(string) --* **[REQUIRED]**
The key for the filter.
See PatchFilter for lists of valid keys for each operating system type.
- **Values** *(list) --* **[REQUIRED]**
The value for the filter key.
See PatchFilter for lists of valid values for each key based on operating system type.
- *(string) --*
- **ComplianceLevel** *(string) --*
A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: Unspecified, Critical, High, Medium, Low, and Informational.
- **ApproveAfterDays** *(integer) --* **[REQUIRED]**
The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of ``7`` means that patches are approved seven days after they are released.
- **EnableNonSecurity** *(boolean) --*
For instances identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is \'false\'. Applies to Linux instances only.
:type ApprovedPatches: list
:param ApprovedPatches:
A list of explicitly approved patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see `Package Name Formats for Approved and Rejected Patch Lists <https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
:type ApprovedPatchesComplianceLevel: string
:param ApprovedPatchesComplianceLevel:
Defines the compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. The default value is UNSPECIFIED.
:type ApprovedPatchesEnableNonSecurity: boolean
:param ApprovedPatchesEnableNonSecurity:
Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is \'false\'. Applies to Linux instances only.
:type RejectedPatches: list
:param RejectedPatches:
A list of explicitly rejected patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see `Package Name Formats for Approved and Rejected Patch Lists <https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
:type RejectedPatchesAction: string
:param RejectedPatchesAction:
The action for Patch Manager to take on patches included in the RejectedPackages list.
* **ALLOW_AS_DEPENDENCY** : A package in the Rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as *InstalledOther* . This is the default action if no option is specified.
* **BLOCK** : Packages in the RejectedPatches list, and packages that include them as dependencies, are not installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as *InstalledRejected* .
:type Description: string
:param Description:
A description of the patch baseline.
:type Sources: list
:param Sources:
Information about the patches to use to update the instances, including target operating systems and source repositories. Applies to Linux instances only.
- *(dict) --*
Information about the patches to use to update the instances, including target operating systems and source repository. Applies to Linux instances only.
- **Name** *(string) --* **[REQUIRED]**
The name specified to | |
<filename>pipeline/test_dataformat_csv.py
# Copyright 2021 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
import os
import tempfile
from streamsets.testframework.markers import sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
ALL_PARSERS = ['UNIVOCITY', 'LEGACY_PARSER']
# Simple CSV, three columns, two rows, header
SIMPLE_CSV = "A,B,C\n" \
"1,2,3\n" \
"10,20,30\n"
# Variant of simple where field separator is semicolon
FIELD_SEMICOLON_CSV = "A;B;C\n" \
"1;2;3\n" \
"10;20;30\n"
# CSV with 3 rows, two of 5 characters and middle one with 11 characters
MAX_LINE_CSV = "A,B,C\n" \
"1,2,3\n" \
"100,200,300\n" \
"1,2,3\n"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
def test_header_with_header(sdc_builder, sdc_executor, csv_parser):
work_dir = _prepare_work_dir(sdc_executor, SIMPLE_CSV)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'WITH_HEADER'
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 2)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 2
assert records[0].field['A'] == "1"
assert records[0].field['B'] == "2"
assert records[0].field['C'] == "3"
assert records[1].field['A'] == "10"
assert records[1].field['B'] == "20"
assert records[1].field['C'] == "30"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
def test_header_ignore_without(sdc_builder, sdc_executor, csv_parser):
work_dir = _prepare_work_dir(sdc_executor, SIMPLE_CSV)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'IGNORE_HEADER'
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 2)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 2
assert records[0].field['0'] == "1"
assert records[0].field['1'] == "2"
assert records[0].field['2'] == "3"
assert records[1].field['0'] == "10"
assert records[1].field['1'] == "20"
assert records[1].field['2'] == "30"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
def test_header_no_header(sdc_builder, sdc_executor, csv_parser):
work_dir = _prepare_work_dir(sdc_executor, SIMPLE_CSV)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'NO_HEADER'
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 3
assert records[0].field['0'] == "A"
assert records[0].field['1'] == "B"
assert records[0].field['2'] == "C"
assert records[1].field['0'] == "1"
assert records[1].field['1'] == "2"
assert records[1].field['2'] == "3"
assert records[2].field['0'] == "10"
assert records[2].field['1'] == "20"
assert records[2].field['2'] == "30"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
def test_field_separator(sdc_builder, sdc_executor, csv_parser):
work_dir = _prepare_work_dir(sdc_executor, FIELD_SEMICOLON_CSV)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'NO_HEADER'
# For legacy parser
origin.delimiter_format_type = 'CUSTOM'
origin.delimiter_character = ';'
# For univocity parser
origin.field_separator = ";"
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 3
assert records[0].field['0'] == "A"
assert records[0].field['1'] == "B"
assert records[0].field['2'] == "C"
assert records[1].field['0'] == "1"
assert records[1].field['1'] == "2"
assert records[1].field['2'] == "3"
assert records[2].field['0'] == "10"
assert records[2].field['1'] == "20"
assert records[2].field['2'] == "30"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
@pytest.mark.parametrize('separator', [';', ';;']) # Univocity limits line separator to max 2 characters (speed optimization on their side)
def test_line_separator(sdc_builder, sdc_executor, csv_parser, separator):
data = f"A,B,C{separator}" \
f"1,2,3{separator}" \
f"10,20,30{separator}"
work_dir = _prepare_work_dir(sdc_executor, data)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'WITH_HEADER'
# For legacy parser
origin.delimiter_format_type = 'MULTI_CHARACTER'
origin.multi_character_line_delimiter = separator
origin.multi_character_field_delimiter = ","
# For univocity parser
origin.line_separator = separator
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 2)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 2
assert records[0].field['A'] == "1"
assert records[0].field['B'] == "2"
assert records[0].field['C'] == "3"
assert records[1].field['A'] == "10"
assert records[1].field['B'] == "20"
assert records[1].field['C'] == "30"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
def test_comments(sdc_builder, sdc_executor, csv_parser):
data = f"#Comment\n" \
f"A,B,C\n" \
f"#Comment\n" \
f"1,2,3\n" \
f"#Comment\n" \
f"10,20,30\n" \
f"#Comment\n"
work_dir = _prepare_work_dir(sdc_executor, data)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'WITH_HEADER'
# For legacy parser
origin.delimiter_format_type = 'CUSTOM'
origin.enable_comments = True
origin.delimiter_character = ','
origin.comment_marker = '#'
# For univocity parser
origin.allow_comments = True
origin.comment_character = '#'
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 2)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 2
assert records[0].field['A'] == "1"
assert records[0].field['B'] == "2"
assert records[0].field['C'] == "3"
assert records[1].field['A'] == "10"
assert records[1].field['B'] == "20"
assert records[1].field['C'] == "30"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
def test_empty_lines(sdc_builder, sdc_executor, csv_parser):
data = f"A,B,C\n" \
f"\n" \
f"1,2,3\n" \
f"\n" \
f"10,20,30\n" \
f"\n"
work_dir = _prepare_work_dir(sdc_executor, data)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'WITH_HEADER'
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 2)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 2
assert records[0].field['A'] == "1"
assert records[0].field['B'] == "2"
assert records[0].field['C'] == "3"
assert records[1].field['A'] == "10"
assert records[1].field['B'] == "20"
assert records[1].field['C'] == "30"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
def test_do_not_skip_empty_lines(sdc_builder, sdc_executor, csv_parser):
data = f"A,B,C\n" \
f"\n" \
f"1,2,3\n" \
f"\n" \
f"10,20,30\n" \
f"\n"
work_dir = _prepare_work_dir(sdc_executor, data)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'WITH_HEADER'
# For legacy parser
origin.delimiter_format_type = 'CUSTOM'
origin.delimiter_character = ','
origin.ignore_empty_lines = False
# For univocity parser
origin.skip_empty_lines = False
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 2)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 5
assert records[0].field['A'] == (None if csv_parser == 'UNIVOCITY' else '')
assert records[1].field['A'] == "1"
assert records[1].field['B'] == "2"
assert records[1].field['C'] == "3"
assert records[2].field['A'] == (None if csv_parser == 'UNIVOCITY' else '')
assert records[3].field['A'] == "10"
assert records[3].field['B'] == "20"
assert records[3].field['C'] == "30"
assert records[4].field['A'] == (None if csv_parser == 'UNIVOCITY' else '')
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
@pytest.mark.parametrize('quote', ['"', ';'])
def test_quote(sdc_builder, sdc_executor, csv_parser, quote):
data = f'A,B,C\n' \
f'{quote}1,Z{quote},2,3\n' \
f'100,{quote}200,Z{quote},300\n' \
f'1,2,{quote}3,Z{quote}\n'
work_dir = _prepare_work_dir(sdc_executor, data)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'WITH_HEADER'
origin.delimiter_format_type = 'CUSTOM'
origin.delimiter_character = ","
origin.quote_character = quote
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 3
assert records[0].field['A'] == "1,Z"
assert records[0].field['B'] == "2"
assert records[0].field['C'] == "3"
assert records[1].field['A'] == "100"
assert records[1].field['B'] == "200,Z"
assert records[1].field['C'] == "300"
assert records[2].field['A'] == "1"
assert records[2].field['B'] == "2"
assert records[2].field['C'] == "3,Z"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ['UNIVOCITY'])
@pytest.mark.parametrize('quote', ['a', '"', ';'])
@pytest.mark.parametrize('escape', ['x', '+', '!'])
def test_quote_escape(sdc_builder, sdc_executor, csv_parser, quote, escape):
data = f'A,B,C\n' \
f'{quote}1,{escape}{quote}Z{quote},2,3\n' \
f'100,{quote}200,{escape}{quote}Z{quote},300\n' \
f'1,2,{quote}3,{escape}{quote}Z{quote}\n'
work_dir = _prepare_work_dir(sdc_executor, data)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'WITH_HEADER'
origin.delimiter_format_type = 'CUSTOM'
origin.delimiter_character = ","
origin.quote_character = quote
origin.escape_character = escape
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 3
assert records[0].field['A'] == f"1,{quote}Z"
assert records[0].field['B'] == "2"
assert records[0].field['C'] == "3"
assert records[1].field['A'] == "100"
assert records[1].field['B'] == f"200,{quote}Z"
assert records[1].field['C'] == "300"
assert records[2].field['A'] == "1"
assert records[2].field['B'] == "2"
assert records[2].field['C'] == f"3,{quote}Z"
@sdc_min_version('3.22.0')
@pytest.mark.parametrize('csv_parser', ALL_PARSERS)
def test_skip_lines_with_header(sdc_builder, sdc_executor, csv_parser):
work_dir = _prepare_work_dir(sdc_executor, SIMPLE_CSV)
# Create Pipeline
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory', type='origin')
origin.file_name_pattern = '*.csv'
origin.files_directory = work_dir
origin.data_format = 'DELIMITED'
origin.csv_parser = csv_parser
origin.header_line = 'WITH_HEADER'
origin.lines_to_skip = 1
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 1)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 1
assert records[0].field['1'] == "10"
assert records[0].field['2'] == "20"
assert | |
<filename>tests/test_graphqlws_subscription.py
import asyncio
import json
import sys
import warnings
from typing import List
import pytest
from parse import search
from gql import Client, gql
from gql.transport.exceptions import TransportServerError
from .conftest import MS, WebSocketServerHelper
# Marking all tests in this file with the websockets marker
pytestmark = pytest.mark.websockets
countdown_server_answer = (
'{{"type":"next","id":"{query_id}","payload":{{"data":{{"number":{number}}}}}}}'
)
COUNTING_DELAY = 20 * MS
PING_SENDING_DELAY = 50 * MS
PONG_TIMEOUT = 100 * MS
# List which can used to store received messages by the server
logged_messages: List[str] = []
def server_countdown_factory(keepalive=False, answer_pings=True):
async def server_countdown_template(ws, path):
import websockets
logged_messages.clear()
try:
await WebSocketServerHelper.send_connection_ack(
ws, payload="dummy_connection_ack_payload"
)
result = await ws.recv()
logged_messages.append(result)
json_result = json.loads(result)
assert json_result["type"] == "subscribe"
payload = json_result["payload"]
query = payload["query"]
query_id = json_result["id"]
count_found = search("count: {:d}", query)
count = count_found[0]
print(f" Server: Countdown started from: {count}")
pong_received: asyncio.Event = asyncio.Event()
async def counting_coro():
print(" Server: counting task started")
try:
for number in range(count, -1, -1):
await ws.send(
countdown_server_answer.format(
query_id=query_id, number=number
)
)
await asyncio.sleep(COUNTING_DELAY)
finally:
print(" Server: counting task ended")
print(" Server: starting counting task")
counting_task = asyncio.ensure_future(counting_coro())
async def keepalive_coro():
print(" Server: keepalive task started")
try:
while True:
await asyncio.sleep(PING_SENDING_DELAY)
try:
# Send a ping
await WebSocketServerHelper.send_ping(
ws, payload="dummy_ping_payload"
)
# Wait for a pong
try:
await asyncio.wait_for(
pong_received.wait(), PONG_TIMEOUT
)
except asyncio.TimeoutError:
print(
"\n Server: No pong received in time!\n"
)
break
pong_received.clear()
except websockets.exceptions.ConnectionClosed:
break
finally:
print(" Server: keepalive task ended")
if keepalive:
print(" Server: starting keepalive task")
keepalive_task = asyncio.ensure_future(keepalive_coro())
async def receiving_coro():
print(" Server: receiving task started")
try:
nonlocal counting_task
while True:
try:
result = await ws.recv()
logged_messages.append(result)
except websockets.exceptions.ConnectionClosed:
break
json_result = json.loads(result)
answer_type = json_result["type"]
if answer_type == "complete" and json_result["id"] == str(
query_id
):
print("Cancelling counting task now")
counting_task.cancel()
if keepalive:
print("Cancelling keep alive task now")
keepalive_task.cancel()
elif answer_type == "ping":
if answer_pings:
payload = json_result.get("payload", None)
await WebSocketServerHelper.send_pong(
ws, payload=payload
)
elif answer_type == "pong":
pong_received.set()
finally:
print(" Server: receiving task ended")
if keepalive:
keepalive_task.cancel()
print(" Server: starting receiving task")
receiving_task = asyncio.ensure_future(receiving_coro())
try:
print(" Server: waiting for counting task to complete")
await counting_task
except asyncio.CancelledError:
print(" Server: Now counting task is cancelled")
print(" Server: sending complete message")
await WebSocketServerHelper.send_complete(ws, query_id)
if keepalive:
print(" Server: cancelling keepalive task")
keepalive_task.cancel()
try:
await keepalive_task
except asyncio.CancelledError:
print(" Server: Now keepalive task is cancelled")
print(" Server: waiting for client to close the connection")
try:
await asyncio.wait_for(receiving_task, 1000 * MS)
except asyncio.TimeoutError:
pass
print(" Server: cancelling receiving task")
receiving_task.cancel()
try:
await receiving_task
except asyncio.CancelledError:
print(" Server: Now receiving task is cancelled")
except websockets.exceptions.ConnectionClosedOK:
pass
except AssertionError as e:
print(f"\n Server: Assertion failed: {e!s}\n")
finally:
print(" Server: waiting for websocket connection to close")
await ws.wait_closed()
print(" Server: connection closed")
return server_countdown_template
async def server_countdown(ws, path):
server = server_countdown_factory()
await server(ws, path)
async def server_countdown_keepalive(ws, path):
server = server_countdown_factory(keepalive=True)
await server(ws, path)
async def server_countdown_dont_answer_pings(ws, path):
server = server_countdown_factory(answer_pings=False)
await server(ws, path)
countdown_subscription_str = """
subscription {{
countdown (count: {count}) {{
number
}}
}}
"""
@pytest.mark.asyncio
@pytest.mark.parametrize("graphqlws_server", [server_countdown], indirect=True)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription(
event_loop, client_and_graphqlws_server, subscription_str
):
session, server = client_and_graphqlws_server
count = 10
subscription = gql(subscription_str.format(count=count))
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
assert count == -1
@pytest.mark.asyncio
@pytest.mark.parametrize("graphqlws_server", [server_countdown], indirect=True)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_break(
event_loop, client_and_graphqlws_server, subscription_str
):
session, server = client_and_graphqlws_server
count = 10
subscription = gql(subscription_str.format(count=count))
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
if count <= 5:
# Note: the following line is only necessary for pypy3 v3.6.1
if sys.version_info < (3, 7):
await session._generator.aclose()
break
count -= 1
assert count == 5
@pytest.mark.asyncio
@pytest.mark.parametrize("graphqlws_server", [server_countdown], indirect=True)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_task_cancel(
event_loop, client_and_graphqlws_server, subscription_str
):
session, server = client_and_graphqlws_server
count = 10
subscription = gql(subscription_str.format(count=count))
async def task_coro():
nonlocal count
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
task = asyncio.ensure_future(task_coro())
async def cancel_task_coro():
nonlocal task
await asyncio.sleep(5.5 * COUNTING_DELAY)
task.cancel()
cancel_task = asyncio.ensure_future(cancel_task_coro())
await asyncio.gather(task, cancel_task)
assert count > 0
@pytest.mark.asyncio
@pytest.mark.parametrize("graphqlws_server", [server_countdown], indirect=True)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_close_transport(
event_loop, client_and_graphqlws_server, subscription_str
):
session, server = client_and_graphqlws_server
count = 10
subscription = gql(subscription_str.format(count=count))
async def task_coro():
nonlocal count
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
task = asyncio.ensure_future(task_coro())
async def close_transport_task_coro():
nonlocal task
await asyncio.sleep(5.5 * COUNTING_DELAY)
await session.transport.close()
close_transport_task = asyncio.ensure_future(close_transport_task_coro())
await asyncio.gather(task, close_transport_task)
assert count > 0
async def server_countdown_close_connection_in_middle(ws, path):
await WebSocketServerHelper.send_connection_ack(ws)
result = await ws.recv()
json_result = json.loads(result)
assert json_result["type"] == "subscribe"
payload = json_result["payload"]
query = payload["query"]
query_id = json_result["id"]
count_found = search("count: {:d}", query)
count = count_found[0]
stopping_before = count // 2
print(f"Countdown started from: {count}, stopping server before {stopping_before}")
for number in range(count, stopping_before, -1):
await ws.send(countdown_server_answer.format(query_id=query_id, number=number))
await asyncio.sleep(COUNTING_DELAY)
print("Closing server while subscription is still running now")
await ws.close()
await ws.wait_closed()
print("Server is now closed")
@pytest.mark.asyncio
@pytest.mark.parametrize(
"graphqlws_server", [server_countdown_close_connection_in_middle], indirect=True
)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_server_connection_closed(
event_loop, client_and_graphqlws_server, subscription_str
):
import websockets
session, server = client_and_graphqlws_server
count = 10
subscription = gql(subscription_str.format(count=count))
with pytest.raises(websockets.exceptions.ConnectionClosedOK):
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
@pytest.mark.asyncio
@pytest.mark.parametrize("graphqlws_server", [server_countdown], indirect=True)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_with_operation_name(
event_loop, client_and_graphqlws_server, subscription_str
):
session, server = client_and_graphqlws_server
count = 10
subscription = gql(subscription_str.format(count=count))
async for result in session.subscribe(
subscription, operation_name="CountdownSubscription"
):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
assert count == -1
# Check that the query contains the operationName
assert '"operationName": "CountdownSubscription"' in logged_messages[0]
@pytest.mark.asyncio
@pytest.mark.parametrize(
"graphqlws_server", [server_countdown_keepalive], indirect=True
)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_with_keepalive(
event_loop, client_and_graphqlws_server, subscription_str
):
session, server = client_and_graphqlws_server
count = 10
subscription = gql(subscription_str.format(count=count))
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
assert count == -1
assert "ping" in session.transport.payloads
assert session.transport.payloads["ping"] == "dummy_ping_payload"
assert (
session.transport.payloads["connection_ack"] == "dummy_connection_ack_payload"
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"graphqlws_server", [server_countdown_keepalive], indirect=True
)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_with_keepalive_with_timeout_ok(
event_loop, graphqlws_server, subscription_str
):
from gql.transport.websockets import WebsocketsTransport
path = "/graphql"
url = f"ws://{graphqlws_server.hostname}:{graphqlws_server.port}{path}"
transport = WebsocketsTransport(url=url, keep_alive_timeout=(5 * COUNTING_DELAY))
client = Client(transport=transport)
count = 10
subscription = gql(subscription_str.format(count=count))
async with client as session:
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
assert count == -1
@pytest.mark.asyncio
@pytest.mark.parametrize(
"graphqlws_server", [server_countdown_keepalive], indirect=True
)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_with_keepalive_with_timeout_nok(
event_loop, graphqlws_server, subscription_str
):
from gql.transport.websockets import WebsocketsTransport
path = "/graphql"
url = f"ws://{graphqlws_server.hostname}:{graphqlws_server.port}{path}"
transport = WebsocketsTransport(url=url, keep_alive_timeout=(COUNTING_DELAY / 2))
client = Client(transport=transport)
count = 10
subscription = gql(subscription_str.format(count=count))
async with client as session:
with pytest.raises(TransportServerError) as exc_info:
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
assert "No keep-alive message has been received" in str(exc_info.value)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"graphqlws_server", [server_countdown_keepalive], indirect=True
)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_with_ping_interval_ok(
event_loop, graphqlws_server, subscription_str
):
from gql.transport.websockets import WebsocketsTransport
path = "/graphql"
url = f"ws://{graphqlws_server.hostname}:{graphqlws_server.port}{path}"
transport = WebsocketsTransport(
url=url,
ping_interval=(5 * COUNTING_DELAY),
pong_timeout=(4 * COUNTING_DELAY),
)
client = Client(transport=transport)
count = 10
subscription = gql(subscription_str.format(count=count))
async with client as session:
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
assert count == -1
@pytest.mark.asyncio
@pytest.mark.parametrize(
"graphqlws_server", [server_countdown_dont_answer_pings], indirect=True
)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_with_ping_interval_nok(
event_loop, graphqlws_server, subscription_str
):
from gql.transport.websockets import WebsocketsTransport
path = "/graphql"
url = f"ws://{graphqlws_server.hostname}:{graphqlws_server.port}{path}"
transport = WebsocketsTransport(url=url, ping_interval=(5 * COUNTING_DELAY))
client = Client(transport=transport)
count = 10
subscription = gql(subscription_str.format(count=count))
async with client as session:
with pytest.raises(TransportServerError) as exc_info:
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
assert number == count
count -= 1
assert "No pong received" in str(exc_info.value)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"graphqlws_server", [server_countdown_keepalive], indirect=True
)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_manual_pings_with_payload(
event_loop, graphqlws_server, subscription_str
):
from gql.transport.websockets import WebsocketsTransport
path = "/graphql"
url = f"ws://{graphqlws_server.hostname}:{graphqlws_server.port}{path}"
transport = WebsocketsTransport(url=url)
client = Client(transport=transport)
count = 10
subscription = gql(subscription_str.format(count=count))
async with client as session:
async for result in session.subscribe(subscription):
number = result["number"]
print(f"Number received: {number}")
payload = {"count_received": count}
await transport.send_ping(payload=payload)
await asyncio.wait_for(transport.pong_received.wait(), 10000 * MS)
transport.pong_received.clear()
assert transport.payloads["pong"] == payload
assert number == count
count -= 1
assert count == -1
@pytest.mark.asyncio
@pytest.mark.parametrize(
"graphqlws_server", [server_countdown_keepalive], indirect=True
)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_graphqlws_subscription_manual_pong_answers_with_payload(
event_loop, graphqlws_server, subscription_str
):
from gql.transport.websockets import WebsocketsTransport
path = "/graphql"
url = f"ws://{graphqlws_server.hostname}:{graphqlws_server.port}{path}"
transport = WebsocketsTransport(url=url, answer_pings=False)
client |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.