text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Interactor
This class can be used to simply managing callback resources.
Callbacks are often used by interactors with vtk and callbacks
are hard to keep track of.
Use multiple inheritance to inherit from this class to get access
to the convenience methods.
Observers for vtk events can be added through
AddObserver(obj, eventName, callbackFunction) and when it is time
to clean up, just call cleanUpCallbacks().
:Authors:
Berend Klein Haneveld
"""
class Interactor(object):
"""
Interactor
"""
def __init__(self):
super(Interactor, self).__init__()
def AddObserver(self, obj, eventName, callbackFunction, priority=None):
"""
Creates a callback and stores the callback so that later
on the callbacks can be properly cleaned up.
"""
if not hasattr(self, "_callbacks"):
self._callbacks = []
if priority is not None:
callback = obj.AddObserver(eventName, callbackFunction, priority)
else:
callback = obj.AddObserver(eventName, callbackFunction)
self._callbacks.append((obj, callback))
def cleanUpCallbacks(self):
"""
Cleans up the vtkCallBacks
"""
if not hasattr(self, "_callbacks"):
return
for obj, callback in self._callbacks:
obj.RemoveObserver(callback)
self._callbacks = []
|
berendkleinhaneveld/Registrationshop
|
ui/Interactor.py
|
Python
|
mit
| 1,240
|
[
"VTK"
] |
f7096f40b557d4ea2c2df176fc8fe62d6dffa338b4f6bb26c76c9d80ba661a0c
|
import os
import mdtraj as md
import mdtraj.utils.fah
n_runs = 1
n_clones = 500 # To do: look this up via glob
project = 10466
codename = {10466:"T4", 10467:"src", 10468:"abl", 10469:"EGFR"}[project]
min_num_frames = 800
stride = 4
input_data_path = "/data/choderalab/fah/analysis/%d/concatenated_trajectories/" % project
output_data_path = "/data/choderalab/fah/analysis/%d/protein_trajectories/" % project
top_filename = "/data/choderalab/fah/analysis/FAHNVT/%s/equil_npt/equil_npt_final_step.pdb" % codename
trj0 = md.load(top_filename)
top, bonds = trj0.top.to_dataframe()
atom_indices = top.index[top.chainID == 0].values
trj0.restrict_atoms(atom_indices)
trj0.save(os.path.join(output_data_path, "../", "protein.pdb"))
for run in range(n_runs):
for clone in range(n_clones):
print(run, clone)
in_filename = os.path.join(input_data_path, "run%d-clone%d.h5" % (run, clone))
if not os.path.exists(in_filename):
continue
if len(md.formats.HDF5TrajectoryFile(in_filename)) < min_num_frames:
continue
trj = md.load(in_filename, atom_indices=atom_indices, stride=stride)
out_filename = os.path.join(output_data_path, "run%d-clone%d.h5" % (run, clone))
trj.save(out_filename)
|
kyleabeauchamp/fah-projects
|
code/analysis/strip_water.py
|
Python
|
gpl-2.0
| 1,265
|
[
"MDTraj"
] |
979b368378a8fdb0921fbe20db51e0b10d3d7c9e3d54d9f4c990547bb674be67
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send Conditional CLI commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_conditional_command
author: "Dave Kasberg (@dkasberg)"
short_description: Execute a single command based on condition on devices running Lenovo CNOS
description:
- This module allows you to modify the running configuration of a switch. It provides a way to
execute a single CNOS command on a network device by evaluating the current running configuration
and executing the command only if the specific settings have not been already configured.
The CNOS command is passed as an argument of the method.
This module functions the same as the cnos_command module.
The only exception is that the following inventory variable can be specified
["condition = <flag string>"]
When this inventory variable is specified as the variable of a task, the command is executed for
the network element that matches the flag string. Usually, commands are executed across a group
of network devices. When there is a requirement to skip the execution of the command on one or
more devices, it is recommended to use this module.
This module uses SSH to manage network device configuration.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_conditional_command.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
clicommand:
description:
- This specifies the CLI command as an attribute to this method. The command is passed using
double quotes. The variables can be placed directly on to the CLI commands or can be invoked
from the vars directory.
required: true
default: Null
condition:
description:
- If you specify condition=false in the inventory file against any device, the command execution
is skipped for that device.
required: true
default: Null
flag:
description:
- If a task needs to be executed, you have to set the flag the same as it is specified in the
inventory for that device.
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_conditional_command. These are written in the main.yml file of the tasks directory.
---
- name: Applying CLI template on VLAG Tier1 Leaf Switch1
cnos_conditional_command:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_conditional_command_{{ inventory_hostname }}_output.txt"
condition: "{{ hostvars[inventory_hostname]['condition']}}"
flag: leaf_switch2
command: "spanning-tree mode enable"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Command Applied"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
clicommand=dict(required=True),
outputfile=dict(required=True),
condition=dict(required=True),
flag=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True), ), supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
condition = module.params['condition']
flag = module.params['flag']
cliCommand = module.params['clicommand']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
if (condition != flag):
module.exit_json(changed=True, msg="Command Skipped for this value")
return " "
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
#
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand + "\n", "(config)#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="CLI Command executed and results saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
Russell-IO/ansible
|
lib/ansible/modules/network/cnos/cnos_conditional_command.py
|
Python
|
gpl-3.0
| 7,305
|
[
"VisIt"
] |
743a3c982ded1069c9bb47ad4b9854a914adc89fdc378f064b1607fbdb4d5841
|
from algernon.neuron import Neuron
class Perceptron:
def __init__(self, n_inputs):
self.neuron = Neuron(n_inputs)
def train(self, in_set, des_out_set, max_epoch=500, des_error=0.1, learning_rate=0.1):
for epoch in range(max_epoch):
error = 0
error_weights = [0] * len(self.neuron.weights)
for x_set, des_out in zip(in_set, des_out_set):
y = self.neuron.transfer(self.neuron.activate(x_set))
y_error = -(y - des_out)
for i, x in enumerate(x_set):
error_weights[i] += (x * y_error)
error_weights[i + 1] += (y_error)
error += (y_error ** 2) / 2.0
for i, error_weight in enumerate(error_weights):
self.neuron.weights[i] += (error_weight * learning_rate)
self.log(epoch, error, error_weights, self.neuron.weights)
if error <= des_error:
break
def go(self, inputs):
return self.neuron.transfer(self.neuron.activate(inputs))
def log(self, epoch, error, error_weights, weights):
print("EPOCH #%d" % epoch)
print(" Error: %f" % error)
print(" Error weights: (last is bias)")
for e in error_weights:
print(" %f" % e)
print(" Weights: (last is bias)")
for e in weights:
print(" %f" % e)
|
andrea96/algernon
|
algernon/perceptron.py
|
Python
|
gpl-3.0
| 1,414
|
[
"NEURON"
] |
256e798e2c30a59caebd37045ba9c25aae4ccd0e001ad1420a84e449806da252
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2009 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
import platform
from distutils.core import setup
version = '0.9.2'
if platform.system() in ('Linux','Unix'):
man1pages = [('share/man/man1', [
'csv2ods/csv2ods.1',
'mailodf/mailodf.1',
'odf2xhtml/odf2xhtml.1',
'odf2mht/odf2mht.1',
'odf2xml/odf2xml.1',
'odfimgimport/odfimgimport.1',
'odflint/odflint.1',
'odfmeta/odfmeta.1',
'odfoutline/odfoutline.1',
'odfuserfield/odfuserfield.1',
'xml2odf/xml2odf.1'])]
else:
man1pages = []
# Currently no other data files to add
datafiles = [] + man1pages
setup(name='odfpy',
version=version,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Office/Business',
'Topic :: Software Development :: Libraries :: Python Modules',
],
description='Python API and tools to manipulate OpenDocument files',
long_description = (
"""
Odfpy is a library to read and write OpenDocument v. 1.1 files.
The main focus has been to prevent the programmer from creating invalid
documents. It has checks that raise an exception if the programmer adds
an invalid element, adds an attribute unknown to the grammar, forgets to
add a required attribute or adds text to an element that doesn't allow it.
These checks and the API itself were generated from the RelaxNG
schema, and then hand-edited. Therefore the API is complete and can
handle all ODF constructions.
In addition to the API, there are a few scripts:
- csv2odf - Create OpenDocument spreadsheet from comma separated values
- mailodf - Email ODF file as HTML archive
- odf2xhtml - Convert ODF to (X)HTML
- odf2mht - Convert ODF to HTML archive
- odf2xml - Create OpenDocument XML file from OD? package
- odfimgimport - Import external images
- odflint - Check ODF file for problems
- odfmeta - List or change the metadata of an ODF file
- odfoutline - Show outline of OpenDocument
- odfuserfield - List or change the user-field declarations in an ODF file
- xml2odf - Create OD? package from OpenDocument in XML form
Visit http://odfpy.forge.osor.eu/ for documentation and examples."""
),
author='Soren Roug',
author_email='soren.roug@eea.europa.eu',
url='http://opendocumentfellowship.com/development/projects/odfpy',
packages=['odf'],
scripts=[
'csv2ods/csv2ods',
'mailodf/mailodf',
'odf2xhtml/odf2xhtml',
'odf2mht/odf2mht',
'odf2xml/odf2xml',
'odfimgimport/odfimgimport',
'odflint/odflint',
'odfmeta/odfmeta',
'odfoutline/odfoutline',
'odfuserfield/odfuserfield',
'xml2odf/xml2odf'],
data_files=datafiles
)
|
agiacomolli/odfpy
|
setup.py
|
Python
|
apache-2.0
| 3,949
|
[
"VisIt"
] |
559aa63807e2a966131dfde0db6a036bfccb52a19d856470daf264da104b0f45
|
# -*- coding: utf-8 -*-
#
# structural_plasticity.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Structural Plasticity example
-----------------------------
This example shows a simple network of two populations where structural
plasticity is used. The network has 1000 neurons, 80% excitatory and
20% inhibitory. The simulation starts without any connectivity. A set of
homeostatic rules are defined, according to which structural plasticity will
create and delete synapses dynamically during the simulation until a desired
level of electrical activity is reached. The model of structural plasticity
used here corresponds to the formulation presented in [1]_.
At the end of the simulation, a plot of the evolution of the connectivity
in the network and the average calcium concentration in the neurons is created.
References
~~~~~~~~~~
.. [1] Butz, M., and van Ooyen, A. (2013). A simple rule for dendritic spine and axonal bouton formation can
account for cortical reorganization after focal retinal lesions. PLoS Comput. Biol. 9 (10), e1003259.
"""
####################################################################################
# First, we have import all necessary modules.
import nest
import numpy
import matplotlib.pyplot as plt
import sys
####################################################################################
# We define general simulation parameters
class StructralPlasticityExample:
def __init__(self):
# simulated time (ms)
self.t_sim = 200000.0
# simulation step (ms).
self.dt = 0.1
self.number_excitatory_neurons = 800
self.number_inhibitory_neurons = 200
# Structural_plasticity properties
self.update_interval = 10000.0
self.record_interval = 1000.0
# rate of background Poisson input
self.bg_rate = 10000.0
self.neuron_model = 'iaf_psc_exp'
####################################################################################
# In this implementation of structural plasticity, neurons grow
# connection points called synaptic elements. Synapses can be created
# between compatible synaptic elements. The growth of these elements is
# guided by homeostatic rules, defined as growth curves.
# Here we specify the growth curves for synaptic elements of excitatory
# and inhibitory neurons.
# Excitatory synaptic elements of excitatory neurons
self.growth_curve_e_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.05, # Ca2+
}
# Inhibitory synaptic elements of excitatory neurons
self.growth_curve_e_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_e_e['eps'], # Ca2+
}
# Excitatory synaptic elements of inhibitory neurons
self.growth_curve_i_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0004, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.2, # Ca2+
}
# Inhibitory synaptic elements of inhibitory neurons
self.growth_curve_i_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_i_e['eps'] # Ca2+
}
# Now we specify the neuron model.
self.model_params = {'tau_m': 10.0, # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
't_ref': 2.0, # absolute refractory period (ms)
'E_L': -65.0, # resting membrane potential (mV)
'V_th': -50.0, # spike threshold (mV)
'C_m': 250.0, # membrane capacitance (pF)
'V_reset': -65.0 # reset potential (mV)
}
self.nodes_e = None
self.nodes_i = None
self.mean_ca_e = []
self.mean_ca_i = []
self.total_connections_e = []
self.total_connections_i = []
####################################################################################
# We initialize variables for the postsynaptic currents of the
# excitatory, inhibitory, and external synapses. These values were
# calculated from a PSP amplitude of 1 for excitatory synapses,
# -1 for inhibitory synapses and 0.11 for external synapses.
self.psc_e = 585.0
self.psc_i = -585.0
self.psc_ext = 6.2
def prepare_simulation(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
####################################################################################
# We set global kernel parameters. Here we define the resolution
# for the simulation, which is also the time resolution for the update
# of the synaptic elements.
nest.resolution = self.dt
####################################################################################
# Set Structural Plasticity synaptic update interval which is how often
# the connectivity will be updated inside the network. It is important
# to notice that synaptic elements and connections change on different
# time scales.
nest.structural_plasticity_update_interval = self.update_interval
####################################################################################
# Now we define Structural Plasticity synapses. In this example we create
# two synapse models, one for excitatory and one for inhibitory synapses.
# Then we define that excitatory synapses can only be created between a
# pre-synaptic element called `Axon_ex` and a postsynaptic element
# called `Den_ex`. In a similar manner, synaptic elements for inhibitory
# synapses are defined.
nest.CopyModel('static_synapse', 'synapse_ex')
nest.SetDefaults('synapse_ex', {'weight': self.psc_e, 'delay': 1.0})
nest.CopyModel('static_synapse', 'synapse_in')
nest.SetDefaults('synapse_in', {'weight': self.psc_i, 'delay': 1.0})
nest.structural_plasticity_synapses = {
'synapse_ex': {
'synapse_model': 'synapse_ex',
'post_synaptic_element': 'Den_ex',
'pre_synaptic_element': 'Axon_ex'
},
'synapse_in': {
'synapse_model': 'synapse_in',
'post_synaptic_element': 'Den_in',
'pre_synaptic_element': 'Axon_in'
}
}
def create_nodes(self):
"""
Assign growth curves to synaptic elements
"""
synaptic_elements = {
'Den_ex': self.growth_curve_e_e,
'Den_in': self.growth_curve_e_i,
'Axon_ex': self.growth_curve_e_e,
}
synaptic_elements_i = {
'Den_ex': self.growth_curve_i_e,
'Den_in': self.growth_curve_i_i,
'Axon_in': self.growth_curve_i_i,
}
####################################################################################
# Then it is time to create a population with 80% of the total network
# size excitatory neurons and another one with 20% of the total network
# size of inhibitory neurons.
self.nodes_e = nest.Create('iaf_psc_alpha',
self.number_excitatory_neurons,
{'synaptic_elements': synaptic_elements})
self.nodes_i = nest.Create('iaf_psc_alpha',
self.number_inhibitory_neurons,
{'synaptic_elements': synaptic_elements_i})
self.nodes_e.synaptic_elements = synaptic_elements
self.nodes_i.synaptic_elements = synaptic_elements_i
def connect_external_input(self):
"""
We create and connect the Poisson generator for external input
"""
noise = nest.Create('poisson_generator')
noise.rate = self.bg_rate
nest.Connect(noise, self.nodes_e, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
nest.Connect(noise, self.nodes_i, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
####################################################################################
# In order to save the amount of average calcium concentration in each
# population through time we create the function ``record_ca``. Here we use
# the value of `Ca` for every neuron in the network and then
# store the average.
def record_ca(self):
ca_e = self.nodes_e.Ca, # Calcium concentration
self.mean_ca_e.append(numpy.mean(ca_e))
ca_i = self.nodes_i.Ca, # Calcium concentration
self.mean_ca_i.append(numpy.mean(ca_i))
####################################################################################
# In order to save the state of the connectivity in the network through time
# we create the function ``record_connectivity``. Here we retrieve the number
# of connected pre-synaptic elements of each neuron. The total amount of
# excitatory connections is equal to the total amount of connected excitatory
# pre-synaptic elements. The same applies for inhibitory connections.
def record_connectivity(self):
syn_elems_e = self.nodes_e.synaptic_elements
syn_elems_i = self.nodes_i.synaptic_elements
self.total_connections_e.append(sum(neuron['Axon_ex']['z_connected']
for neuron in syn_elems_e))
self.total_connections_i.append(sum(neuron['Axon_in']['z_connected']
for neuron in syn_elems_i))
####################################################################################
# We define a function to plot the recorded values
# at the end of the simulation.
def plot_data(self):
fig, ax1 = plt.subplots()
ax1.axhline(self.growth_curve_e_e['eps'],
linewidth=4.0, color='#9999FF')
ax1.plot(self.mean_ca_e, 'b',
label='Ca Concentration Excitatory Neurons', linewidth=2.0)
ax1.axhline(self.growth_curve_i_e['eps'],
linewidth=4.0, color='#FF9999')
ax1.plot(self.mean_ca_i, 'r',
label='Ca Concentration Inhibitory Neurons', linewidth=2.0)
ax1.set_ylim([0, 0.275])
ax1.set_xlabel("Time in [s]")
ax1.set_ylabel("Ca concentration")
ax2 = ax1.twinx()
ax2.plot(self.total_connections_e, 'm',
label='Excitatory connections', linewidth=2.0, linestyle='--')
ax2.plot(self.total_connections_i, 'k',
label='Inhibitory connections', linewidth=2.0, linestyle='--')
ax2.set_ylim([0, 2500])
ax2.set_ylabel("Connections")
ax1.legend(loc=1)
ax2.legend(loc=4)
plt.savefig('StructuralPlasticityExample.eps', format='eps')
####################################################################################
# It is time to specify how we want to perform the simulation. In this
# function we first enable structural plasticity in the network and then we
# simulate in steps. On each step we record the calcium concentration and the
# connectivity. At the end of the simulation, the plot of connections and
# calcium concentration through time is generated.
def simulate(self):
if nest.NumProcesses() > 1:
sys.exit("For simplicity, this example only works " +
"for a single process.")
nest.EnableStructuralPlasticity()
print("Starting simulation")
sim_steps = numpy.arange(0, self.t_sim, self.record_interval)
for i, step in enumerate(sim_steps):
nest.Simulate(self.record_interval)
self.record_ca()
self.record_connectivity()
if i % 20 == 0:
print("Progress: " + str(i / 2) + "%")
print("Simulation finished successfully")
####################################################################################
# Finally we take all the functions that we have defined and create the sequence
# for our example. We prepare the simulation, create the nodes for the network,
# connect the external input and then simulate. Please note that as we are
# simulating 200 biological seconds in this example, it will take a few minutes
# to complete.
if __name__ == '__main__':
example = StructralPlasticityExample()
# Prepare simulation
example.prepare_simulation()
example.create_nodes()
example.connect_external_input()
# Start simulation
example.simulate()
example.plot_data()
|
sanjayankur31/nest-simulator
|
pynest/examples/structural_plasticity.py
|
Python
|
gpl-2.0
| 13,737
|
[
"Gaussian",
"NEURON"
] |
3e17930b3e2ad43cc2473abc96a368b3a804e39f115471f477b00ba48aecbadc
|
from pathlib import Path
import numpy as np
import pytest
import psi4
from psi4.driver.p4util.solvers import davidson_solver, hamiltonian_solver
from psi4.driver.procrouting.response.scf_products import (TDRSCFEngine,
TDUSCFEngine)
from .utils import *
## marks
# reference type
UHF = pytest.mark.unrestricted
RHF_singlet = pytest.mark.restricted_singlet
RHF_triplet = pytest.mark.restricted_triplet
# functional types
hf = pytest.mark.hf
lda = pytest.mark.lda
gga = pytest.mark.gga
hyb_gga = pytest.mark.hyb_gga
hyb_gga_lrc = pytest.mark.hyb_gga_lrc
# response type
RPA = pytest.mark.RPA
TDA = pytest.mark.TDA
@pytest.fixture
def tddft_systems():
psi4.core.clean()
# Canonical unrestricted system
ch2 = psi4.geometry("""0 3
C 0.000000 0.000000 0.159693
H -0.000000 0.895527 -0.479080
H -0.000000 -0.895527 -0.479080
no_reorient
no_com
""")
# Canonical restricted system
h2o = psi4.geometry("""0 1
O 0.000000 0.000000 0.135446
H -0.000000 0.866812 -0.541782
H -0.000000 -0.866812 -0.541782
no_reorient
no_com
""")
return {'UHF': ch2, 'RHF': h2o}
@pytest.fixture
def wfn_factory(tddft_systems):
def _build_wfn(ref, func, basis, nosym):
if ref.startswith('RHF'):
mol = tddft_systems['RHF']
else:
mol = tddft_systems['UHF']
psi4.set_options({'reference': 'UHF'})
if nosym:
mol.reset_point_group('c1')
psi4.set_options({'scf_type': 'pk', 'e_convergence': 8, 'd_convergence': 8, 'save_jk': True})
e, wfn = psi4.energy("{}/{}".format(func, basis), return_wfn=True, molecule=mol)
return wfn
return _build_wfn
@pytest.fixture
def solver_funcs():
return {'TDA': davidson_solver, 'RPA': hamiltonian_solver}
@pytest.fixture
def engines():
return {
'RHF-1': lambda w, p: TDRSCFEngine(w, ptype=p.lower(), triplet=False),
'RHF-3': lambda w, p: TDRSCFEngine(w, ptype=p.lower(), triplet=True),
'UHF': lambda w, p: TDUSCFEngine(w, ptype=p.lower())
}
@pytest.fixture
def expected():
"""Fixture holding expected values"""
return {
"UHF-SVWN-RPA-cc-pvdz": [{
"e": 0.18569065156695178,
"sym": "B2"
}, {
"e": 0.2371470529055981,
"sym": "A2"
}, {
"e": 0.2606702942260291,
"sym": "B1"
}, {
"e": 0.304699657491157,
"sym": "A2"
}, {
"e": 0.3264956806549558,
"sym": "A1"
}, {
"e": 0.37615871505650317,
"sym": "B2"
}, {
"e": 0.39731897494137147,
"sym": "A1"
}, {
"e": 0.4074250386237938,
"sym": "B2"
}, {
"e": 0.44300573265041665,
"sym": "B2"
}, {
"e": 0.4471437063618376,
"sym": "A1"
}],
"UHF-SVWN-TDA-cc-pvdz": [{
"e": 0.18742889452032843,
"sym": "B2"
}, {
"e": 0.2379445132034474,
"sym": "A2"
}, {
"e": 0.2614052806756598,
"sym": "B1"
}, {
"e": 0.3049128035615499,
"sym": "A2"
}, {
"e": 0.3271020444759012,
"sym": "A1"
}, {
"e": 0.37727956939219004,
"sym": "B2"
}, {
"e": 0.4011813287341809,
"sym": "A1"
}, {
"e": 0.40830334743110247,
"sym": "B2"
}, {
"e": 0.44456757885588194,
"sym": "B2"
}, {
"e": 0.44881580053474746,
"sym": "A1"
}],
"RHF-1-SVWN-RPA-cc-pvdz": [{
"e": 0.22569596402035155,
"sym": "B1"
}, {
"e": 0.2900256530242798,
"sym": "A2"
}, {
"e": 0.32071133729636214,
"sym": "A1"
}, {
"e": 0.38457063497252675,
"sym": "B2"
}, {
"e": 0.4422266470138081,
"sym": "B2"
}, {
"e": 0.5495126190664035,
"sym": "A1"
}, {
"e": 0.6878003195644223,
"sym": "A2"
}, {
"e": 0.7199890511259994,
"sym": "B1"
}, {
"e": 0.798062986738022,
"sym": "B2"
}, {
"e": 0.804067826031505,
"sym": "A1"
}],
"RHF-1-SVWN-TDA-cc-pvdz": [{
"e": 0.2272761848870576,
"sym": "B1"
}, {
"e": 0.290485019555299,
"sym": "A2"
}, {
"e": 0.32546669962547287,
"sym": "A1"
}, {
"e": 0.38741503253259757,
"sym": "B2"
}, {
"e": 0.44712165676834864,
"sym": "B2"
}, {
"e": 0.5652927781399749,
"sym": "A1"
}, {
"e": 0.6879399669898522,
"sym": "A2"
}, {
"e": 0.721392875244794,
"sym": "B1"
}, {
"e": 0.8013924753548491,
"sym": "B2"
}, {
"e": 0.8054973746760368,
"sym": "A1"
}],
"RHF-3-SVWN-RPA-cc-pvdz": [{
"e": 0.19919602757891613,
"sym": "B1"
}, {
"e": 0.2697069526242387,
"sym": "A2"
}, {
"e": 0.2731025900215326,
"sym": "A1"
}, {
"e": 0.3404714479946839,
"sym": "B2"
}, {
"e": 0.39101279120353993,
"sym": "B2"
}, {
"e": 0.4437113196420621,
"sym": "A1"
}, {
"e": 0.6798587909761624,
"sym": "A2"
}, {
"e": 0.6987920419186496,
"sym": "B1"
}, {
"e": 0.7436776643975973,
"sym": "B2"
}, {
"e": 0.7614459618174199,
"sym": "A1"
}],
"RHF-3-SVWN-TDA-cc-pvdz": [{
"e": 0.19988323990932086,
"sym": "B1"
}, {
"e": 0.2701736690197542,
"sym": "A2"
}, {
"e": 0.2748922820263834,
"sym": "A1"
}, {
"e": 0.3421398672353456,
"sym": "B2"
}, {
"e": 0.39202707250403035,
"sym": "B2"
}, {
"e": 0.44628377221576965,
"sym": "A1"
}, {
"e": 0.6799727138758552,
"sym": "A2"
}, {
"e": 0.6990125378535389,
"sym": "B1"
}, {
"e": 0.7447874939365398,
"sym": "B2"
}, {
"e": 0.7633679513832042,
"sym": "A1"
}],
"UHF-HF-RPA-cc-pvdz": [{
"e": 0.2445704160468683,
"sym": "B2"
}, {
"e": 0.2878574429978692,
"sym": "A2"
}, {
"e": 0.3179110389232691,
"sym": "B1"
}, {
"e": 0.3547301851175197,
"sym": "A2"
}, {
"e": 0.3879221731828428,
"sym": "A1"
}, {
"e": 0.4038089052916107,
"sym": "B2"
}, {
"e": 0.43529939972603865,
"sym": "A1"
}, {
"e": 0.4508039388809985,
"sym": "B2"
}, {
"e": 0.4834961361605727,
"sym": "B2"
}, {
"e": 0.515831865012076,
"sym": "A1"
}],
"UHF-HF-TDA-cc-pvdz": [{
"e": 0.24880026306449304,
"sym": "B2"
}, {
"e": 0.28968755925744966,
"sym": "A2"
}, {
"e": 0.32054964027744337,
"sym": "B1"
}, {
"e": 0.35741288565867185,
"sym": "A2"
}, {
"e": 0.39502214228627547,
"sym": "A1"
}, {
"e": 0.41144173957102564,
"sym": "B2"
}, {
"e": 0.4445528791268893,
"sym": "A1"
}, {
"e": 0.4535932124573471,
"sym": "B2"
}, {
"e": 0.48482278670215617,
"sym": "B2"
}, {
"e": 0.5203446818128086,
"sym": "A1"
}],
"RHF-1-HF-RPA-cc-pvdz": [{
"e": 0.27878771020942616,
"sym": "B1"
}, {
"e": 0.333654448674359,
"sym": "A2"
}, {
"e": 0.3755413264388134,
"sym": "A1"
}, {
"e": 0.42481114308980833,
"sym": "B2"
}, {
"e": 0.45601499280888025,
"sym": "B2"
}, {
"e": 0.5776368755615227,
"sym": "A1"
}, {
"e": 0.8037995559773897,
"sym": "A2"
}, {
"e": 0.839747743228828,
"sym": "B1"
}, {
"e": 0.9021958669217016,
"sym": "B2"
}, {
"e": 0.9229188098690396,
"sym": "A1"
}],
"RHF-1-HF-TDA-cc-pvdz": [{
"e": 0.282253171319435,
"sym": "B1"
}, {
"e": 0.33726690707429396,
"sym": "A2"
}, {
"e": 0.3798850963561309,
"sym": "A1"
}, {
"e": 0.4300699711369161,
"sym": "B2"
}, {
"e": 0.45887776503019195,
"sym": "B2"
}, {
"e": 0.5918368137683881,
"sym": "A1"
}, {
"e": 0.8045639418850057,
"sym": "A2"
}, {
"e": 0.8421474739868723,
"sym": "B1"
}, {
"e": 0.9055290304707769,
"sym": "B2"
}, {
"e": 0.9250906948276983,
"sym": "A1"
}],
"RHF-3-HF-RPA-cc-pvdz": [{
"e": 0.2357358789223071,
"sym": "B1"
}, {
"e": 0.26804588324807327,
"sym": "A1"
}, {
"e": 0.3013517942130891,
"sym": "A2"
}, {
"e": 0.30942929529453067,
"sym": "B2"
}, {
"e": 0.4023793566470789,
"sym": "B2"
}, {
"e": 0.42341834376775817,
"sym": "A1"
}, {
"e": 0.7930871284740221,
"sym": "A2"
}, {
"e": 0.8015468225092716,
"sym": "B2"
}, {
"e": 0.8053062781991327,
"sym": "A1"
}, {
"e": 0.8117190349721608,
"sym": "B1"
}],
"RHF-3-HF-TDA-cc-pvdz": [{
"e": 0.2428836221449658,
"sym": "B1"
}, {
"e": 0.2954976271417805,
"sym": "A1"
}, {
"e": 0.30883763120257796,
"sym": "A2"
}, {
"e": 0.33788429569198375,
"sym": "B2"
}, {
"e": 0.4084209452630434,
"sym": "B2"
}, {
"e": 0.4426382394256013,
"sym": "A1"
}, {
"e": 0.7945056523218094,
"sym": "A2"
}, {
"e": 0.808852587818601,
"sym": "B2"
}, {
"e": 0.8142547382233867,
"sym": "B1"
}, {
"e": 0.8170954608512094,
"sym": "A1"
}],
"UHF-HCTH93-RPA-cc-pvdz": [{
"e": 0.1856355275832295,
"sym": "B2"
}, {
"e": 0.2451767798678136,
"sym": "A2"
}, {
"e": 0.26726679761146477,
"sym": "B1"
}, {
"e": 0.3089515541022707,
"sym": "A2"
}, {
"e": 0.3376895242828316,
"sym": "A1"
}, {
"e": 0.38421049161220766,
"sym": "B2"
}, {
"e": 0.41623385122261786,
"sym": "B2"
}, {
"e": 0.41855640840345093,
"sym": "A1"
}, {
"e": 0.45103913454488037,
"sym": "B2"
}, {
"e": 0.4544715212646558,
"sym": "A1"
}],
"UHF-HCTH93-TDA-cc-pvdz": [{
"e": 0.18766409018421026,
"sym": "B2"
}, {
"e": 0.24640420723869694,
"sym": "A2"
}, {
"e": 0.26811570696078824,
"sym": "B1"
}, {
"e": 0.3091904246984007,
"sym": "A2"
}, {
"e": 0.33847595978393646,
"sym": "A1"
}, {
"e": 0.3854121944573539,
"sym": "B2"
}, {
"e": 0.41763767534141255,
"sym": "B2"
}, {
"e": 0.4238887350955218,
"sym": "A1"
}, {
"e": 0.4524539834604195,
"sym": "B2"
}, {
"e": 0.4571064476865819,
"sym": "A1"
}],
"RHF-1-HCTH93-RPA-cc-pvdz": [{
"e": 0.2278274247242806,
"sym": "B1"
}, {
"e": 0.2894192892033345,
"sym": "A2"
}, {
"e": 0.3289064362097446,
"sym": "A1"
}, {
"e": 0.3884917876813066,
"sym": "B2"
}, {
"e": 0.4481984119170576,
"sym": "B2"
}, {
"e": 0.5517690274667697,
"sym": "A1"
}, {
"e": 0.6738466018181832,
"sym": "A2"
}, {
"e": 0.707226011428162,
"sym": "B1"
}, {
"e": 0.7881149451422703,
"sym": "B2"
}, {
"e": 0.7978351409386365,
"sym": "A1"
}],
"RHF-1-HCTH93-TDA-cc-pvdz": [{
"e": 0.22904382729841943,
"sym": "B1"
}, {
"e": 0.28974635817342015,
"sym": "A2"
}, {
"e": 0.3335662503004033,
"sym": "A1"
}, {
"e": 0.39102014106803623,
"sym": "B2"
}, {
"e": 0.4529464243816721,
"sym": "B2"
}, {
"e": 0.5679313794941492,
"sym": "A1"
}, {
"e": 0.6739678745823723,
"sym": "A2"
}, {
"e": 0.7085967611567234,
"sym": "B1"
}, {
"e": 0.7913415356561492,
"sym": "B2"
}, {
"e": 0.7994116868730944,
"sym": "A1"
}],
"RHF-3-HCTH93-RPA-cc-pvdz": [{
"e": 0.2006807002071702,
"sym": "B1"
}, {
"e": 0.26766736522651347,
"sym": "A2"
}, {
"e": 0.2785010654940702,
"sym": "A1"
}, {
"e": 0.3420810683193752,
"sym": "B2"
}, {
"e": 0.39419895746268907,
"sym": "B2"
}, {
"e": 0.4394557480987003,
"sym": "A1"
}, {
"e": 0.6662358171322572,
"sym": "A2"
}, {
"e": 0.6870212339278138,
"sym": "B1"
}, {
"e": 0.732255974970336,
"sym": "B2"
}, {
"e": 0.7517257660210537,
"sym": "A1"
}],
"RHF-3-HCTH93-TDA-cc-pvdz": [{
"e": 0.20200735074875364,
"sym": "B1"
}, {
"e": 0.26861182281428897,
"sym": "A2"
}, {
"e": 0.281727656007949,
"sym": "A1"
}, {
"e": 0.3449548653374312,
"sym": "B2"
}, {
"e": 0.3960474483835103,
"sym": "B2"
}, {
"e": 0.44406043820563673,
"sym": "A1"
}, {
"e": 0.6665408365088539,
"sym": "A2"
}, {
"e": 0.6875430743070515,
"sym": "B1"
}, {
"e": 0.7339868680592164,
"sym": "B2"
}, {
"e": 0.7549670562639251,
"sym": "A1"
}],
"UHF-LRC-wPBE-RPA-cc-pvdz": [{
"e": 0.20857445467620409,
"sym": "B2"
}, {
"e": 0.25750617756036887,
"sym": "A2"
}, {
"e": 0.29567402388969183,
"sym": "B1"
}, {
"e": 0.3353963665599838,
"sym": "A2"
}, {
"e": 0.3676659466310203,
"sym": "A1"
}, {
"e": 0.4059771153180213,
"sym": "B2"
}, {
"e": 0.42314272384914664,
"sym": "A1"
}, {
"e": 0.4309152055539914,
"sym": "B2"
}, {
"e": 0.47108588995855805,
"sym": "B2"
}, {
"e": 0.47426103142096276,
"sym": "A1"
}],
"UHF-LRC-wPBE-TDA-cc-pvdz": [{
"e": 0.2111138328596782,
"sym": "B2"
}, {
"e": 0.2588953019501709,
"sym": "A2"
}, {
"e": 0.296850002209101,
"sym": "B1"
}, {
"e": 0.33591453200697347,
"sym": "A2"
}, {
"e": 0.36902567156283717,
"sym": "A1"
}, {
"e": 0.40870758997839934,
"sym": "B2"
}, {
"e": 0.4285081249314508,
"sym": "A1"
}, {
"e": 0.4325872997269013,
"sym": "B2"
}, {
"e": 0.4722361437522301,
"sym": "B2"
}, {
"e": 0.4778293906339199,
"sym": "A1"
}],
"RHF-1-LRC-wPBE-RPA-cc-pvdz": [{
"e": 0.24823799843052555,
"sym": "B1"
}, {
"e": 0.3133577978678068,
"sym": "A2"
}, {
"e": 0.34407655653012253,
"sym": "A1"
}, {
"e": 0.4111771444491586,
"sym": "B2"
}, {
"e": 0.45620241435353615,
"sym": "B2"
}, {
"e": 0.5713821408751654,
"sym": "A1"
}, {
"e": 0.7121320459794471,
"sym": "A2"
}, {
"e": 0.7467646074860463,
"sym": "B1"
}, {
"e": 0.8237985372718415,
"sym": "B2"
}, {
"e": 0.8347057361843613,
"sym": "A1"
}],
"RHF-1-LRC-wPBE-TDA-cc-pvdz": [{
"e": 0.25019673731879144,
"sym": "B1"
}, {
"e": 0.3137951148053371,
"sym": "A2"
}, {
"e": 0.3480087340356468,
"sym": "A1"
}, {
"e": 0.4139259937707775,
"sym": "B2"
}, {
"e": 0.4595355779026114,
"sym": "B2"
}, {
"e": 0.5866661840952361,
"sym": "A1"
}, {
"e": 0.7122790432693733,
"sym": "A2"
}, {
"e": 0.7482823545045337,
"sym": "B1"
}, {
"e": 0.8268119817153274,
"sym": "B2"
}, {
"e": 0.8363484308992859,
"sym": "A1"
}],
"RHF-3-LRC-wPBE-RPA-cc-pvdz": [{
"e": 0.21493576239775788,
"sym": "B1"
}, {
"e": 0.28128298920592243,
"sym": "A1"
}, {
"e": 0.2873576522121203,
"sym": "A2"
}, {
"e": 0.34026197685653914,
"sym": "B2"
}, {
"e": 0.40093878320580273,
"sym": "B2"
}, {
"e": 0.44324092831429845,
"sym": "A1"
}, {
"e": 0.7026102965244811,
"sym": "A2"
}, {
"e": 0.7221756358136507,
"sym": "B1"
}, {
"e": 0.7560327866158897,
"sym": "B2"
}, {
"e": 0.7725589569358363,
"sym": "A1"
}],
"RHF-3-LRC-wPBE-TDA-cc-pvdz": [{
"e": 0.2164314598227564,
"sym": "B1"
}, {
"e": 0.28700485871629755,
"sym": "A1"
}, {
"e": 0.28869165261820007,
"sym": "A2"
}, {
"e": 0.34761184135284623,
"sym": "B2"
}, {
"e": 0.40273215014290165,
"sym": "B2"
}, {
"e": 0.44947361340716696,
"sym": "A1"
}, {
"e": 0.7029814646815448,
"sym": "A2"
}, {
"e": 0.722752600176611,
"sym": "B1"
}, {
"e": 0.7587595863440195,
"sym": "B2"
}, {
"e": 0.7765352336283384,
"sym": "A1"
}],
"UHF-PBE0-RPA-cc-pvdz": [{
"e": 0.2057925309643518,
"sym": "B2"
}, {
"e": 0.25788469558192867,
"sym": "A2"
}, {
"e": 0.27774770438319873,
"sym": "B1"
}, {
"e": 0.3209796073504773,
"sym": "A2"
}, {
"e": 0.34905608972637064,
"sym": "A1"
}, {
"e": 0.3920417722330229,
"sym": "B2"
}, {
"e": 0.4166160441764259,
"sym": "B2"
}, {
"e": 0.4181815653141393,
"sym": "A1"
}, {
"e": 0.4582640513447503,
"sym": "B2"
}, {
"e": 0.46098717614063206,
"sym": "A1"
}],
"UHF-PBE0-TDA-cc-pvdz": [{
"e": 0.20835395874131485,
"sym": "B2"
}, {
"e": 0.2593583434134383,
"sym": "A2"
}, {
"e": 0.2788171096674114,
"sym": "B1"
}, {
"e": 0.32146837333948175,
"sym": "A2"
}, {
"e": 0.3502541176392687,
"sym": "A1"
}, {
"e": 0.3942357067851706,
"sym": "B2"
}, {
"e": 0.41852333401321756,
"sym": "B2"
}, {
"e": 0.42350654214171385,
"sym": "A1"
}, {
"e": 0.45957232722509295,
"sym": "B2"
}, {
"e": 0.46426154077373694,
"sym": "A1"
}],
"RHF-1-PBE0-RPA-cc-pvdz": [{
"e": 0.24152757214539713,
"sym": "B1"
}, {
"e": 0.30322600965964747,
"sym": "A2"
}, {
"e": 0.3397842356642792,
"sym": "A1"
}, {
"e": 0.3989212454015664,
"sym": "B2"
}, {
"e": 0.450576093081613,
"sym": "B2"
}, {
"e": 0.5607321872200163,
"sym": "A1"
}, {
"e": 0.7111912633239198,
"sym": "A2"
}, {
"e": 0.7451917364838366,
"sym": "B1"
}, {
"e": 0.8210717375437115,
"sym": "B2"
}, {
"e": 0.8330520166726922,
"sym": "A1"
}],
"RHF-1-PBE0-TDA-cc-pvdz": [{
"e": 0.24295344585768072,
"sym": "B1"
}, {
"e": 0.3037111007164037,
"sym": "A2"
}, {
"e": 0.3439369091046927,
"sym": "A1"
}, {
"e": 0.40143489905930346,
"sym": "B2"
}, {
"e": 0.4543833228907001,
"sym": "B2"
}, {
"e": 0.5759280320661313,
"sym": "A1"
}, {
"e": 0.711338260613846,
"sym": "A2"
}, {
"e": 0.7466102603316238,
"sym": "B1"
}, {
"e": 0.8240815070549493,
"sym": "B2"
}, {
"e": 0.8345991631491648,
"sym": "A1"
}],
"RHF-3-PBE0-RPA-cc-pvdz": [{
"e": 0.20871042716938573,
"sym": "B1"
}, {
"e": 0.2750098798583243,
"sym": "A1"
}, {
"e": 0.2767775222696862,
"sym": "A2"
}, {
"e": 0.33116284461011086,
"sym": "B2"
}, {
"e": 0.3922953425581455,
"sym": "B2"
}, {
"e": 0.43251747601418633,
"sym": "A1"
}, {
"e": 0.7010631500480085,
"sym": "A2"
}, {
"e": 0.7203308198250776,
"sym": "B1"
}, {
"e": 0.752831920627748,
"sym": "B2"
}, {
"e": 0.7687590769912455,
"sym": "A1"
}],
"RHF-3-PBE0-TDA-cc-pvdz": [{
"e": 0.21080513855083327,
"sym": "B1"
}, {
"e": 0.27866276251298894,
"sym": "A2"
}, {
"e": 0.28137118757987817,
"sym": "A1"
}, {
"e": 0.3390676238758892,
"sym": "B2"
}, {
"e": 0.3944745773813006,
"sym": "B2"
}, {
"e": 0.4398416159847564,
"sym": "A1"
}, {
"e": 0.7015372163080204,
"sym": "A2"
}, {
"e": 0.7211099054616862,
"sym": "B1"
}, {
"e": 0.7557167424425485,
"sym": "B2"
}, {
"e": 0.7733747918949263,
"sym": "A1"
}],
"UHF-wB97X-RPA-cc-pvdz": [{
"e": 0.18313657365448505,
"sym": "B2"
}, {
"e": 0.24031484450350646,
"sym": "A2"
}, {
"e": 0.2859023790418515,
"sym": "B1"
}, {
"e": 0.3266610526061227,
"sym": "A2"
}, {
"e": 0.36160598335381505,
"sym": "A1"
}, {
"e": 0.4077778321196165,
"sym": "B2"
}, {
"e": 0.4338588262847624,
"sym": "B2"
}, {
"e": 0.4340278731681775,
"sym": "A1"
}, {
"e": 0.4666281971415478,
"sym": "B2"
}, {
"e": 0.48119562857322856,
"sym": "A1"
}],
"UHF-wB97X-TDA-cc-pvdz": [{
"e": 0.1852129103746918,
"sym": "B2"
}, {
"e": 0.24126665195577823,
"sym": "A2"
}, {
"e": 0.28685786142637143,
"sym": "B1"
}, {
"e": 0.3270946946114049,
"sym": "A2"
}, {
"e": 0.3624254932451533,
"sym": "A1"
}, {
"e": 0.4091853311706593,
"sym": "B2"
}, {
"e": 0.43516710216510507,
"sym": "B2"
}, {
"e": 0.4401539852258495,
"sym": "A1"
}, {
"e": 0.4679474978186349,
"sym": "B2"
}, {
"e": 0.484293596458422,
"sym": "A1"
}],
"RHF-1-wB97X-RPA-cc-pvdz": [{
"e": 0.2472126923332907,
"sym": "B1"
}, {
"e": 0.3131997757811362,
"sym": "A2"
}, {
"e": 0.34444772468718604,
"sym": "A1"
}, {
"e": 0.41038703401580556,
"sym": "B2"
}, {
"e": 0.45278105243050515,
"sym": "B2"
}, {
"e": 0.5674462884373929,
"sym": "A1"
}, {
"e": 0.7190997175219462,
"sym": "A2"
}, {
"e": 0.7518360139884983,
"sym": "B1"
}, {
"e": 0.8295240817144648,
"sym": "B2"
}, {
"e": 0.840034387944184,
"sym": "A1"
}],
"RHF-1-wB97X-TDA-cc-pvdz": [{
"e": 0.24888846143844873,
"sym": "B1"
}, {
"e": 0.31362606792192205,
"sym": "A2"
}, {
"e": 0.34848280029565865,
"sym": "A1"
}, {
"e": 0.41301093564098723,
"sym": "B2"
}, {
"e": 0.4563016375242363,
"sym": "B2"
}, {
"e": 0.5822268659394665,
"sym": "A1"
}, {
"e": 0.7192503897441206,
"sym": "A2"
}, {
"e": 0.7532949620910153,
"sym": "B1"
}, {
"e": 0.8325669256159359,
"sym": "B2"
}, {
"e": 0.8418130551522903,
"sym": "A1"
}],
"RHF-3-wB97X-RPA-cc-pvdz": [{
"e": 0.21928320724732356,
"sym": "B1"
}, {
"e": 0.29090028689934033,
"sym": "A2"
}, {
"e": 0.29346538960855156,
"sym": "A1"
}, {
"e": 0.3566558496155522,
"sym": "B2"
}, {
"e": 0.4047901122018677,
"sym": "B2"
}, {
"e": 0.45354911327036923,
"sym": "A1"
}, {
"e": 0.7135101455725047,
"sym": "A2"
}, {
"e": 0.7332151322871041,
"sym": "B1"
}, {
"e": 0.7665614675068497,
"sym": "B2"
}, {
"e": 0.7836168280705303,
"sym": "A1"
}],
"RHF-3-wB97X-TDA-cc-pvdz": [{
"e": 0.22053635914394396,
"sym": "B1"
}, {
"e": 0.29207259028650134,
"sym": "A2"
}, {
"e": 0.2973130436723683,
"sym": "A1"
}, {
"e": 0.36116866641628476,
"sym": "B2"
}, {
"e": 0.4065136554262517,
"sym": "B2"
}, {
"e": 0.4588630653011993,
"sym": "A1"
}, {
"e": 0.7138849886618163,
"sym": "A2"
}, {
"e": 0.7338067963790569,
"sym": "B1"
}, {
"e": 0.7689722230616385,
"sym": "B2"
}, {
"e": 0.7875931047630326,
"sym": "A1"
}]
}
@pytest.mark.tdscf
@pytest.mark.parametrize("ref,func,ptype,basis", [
pytest.param( 'UHF', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, UHF, RPA]), # G09 rev E.01
pytest.param( 'UHF', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, UHF, TDA]), # G09 rev E.01
pytest.param( 'RHF-1', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, RHF_singlet, RPA]), # G09 rev E.01
pytest.param( 'RHF-1', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, RHF_singlet, TDA]), # G09 rev E.01
pytest.param( 'RHF-3', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, RHF_triplet, RPA]), # G09 rev E.01
pytest.param( 'RHF-3', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, RHF_triplet, TDA]), # G09 rev E.01
pytest.param( 'UHF', 'HF', 'RPA', 'cc-pvdz', marks=[hf, UHF, RPA, pytest.mark.quick]), # G09 rev E.01
pytest.param( 'UHF', 'HF', 'TDA', 'cc-pvdz', marks=[hf, UHF, TDA, pytest.mark.quick]), # G09 rev E.01
pytest.param( 'RHF-1', 'HF', 'RPA', 'cc-pvdz', marks=[hf, RHF_singlet, RPA, pytest.mark.quick]), # G09 rev E.01
pytest.param( 'RHF-1', 'HF', 'TDA', 'cc-pvdz', marks=[hf, RHF_singlet, TDA, pytest.mark.quick]), # G09 rev E.01
pytest.param( 'RHF-3', 'HF', 'RPA', 'cc-pvdz', marks=[hf, RHF_triplet, RPA, pytest.mark.quick]), # G09 rev E.01
pytest.param( 'RHF-3', 'HF', 'TDA', 'cc-pvdz', marks=[hf, RHF_triplet, TDA, pytest.mark.quick]), # G09 rev E.01
pytest.param( 'UHF', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, UHF, RPA]), # G09 rev E.01
pytest.param( 'UHF', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, UHF, TDA]), # G09 rev E.01
pytest.param( 'RHF-1', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, RHF_singlet, RPA]), # G09 rev E.01
pytest.param( 'RHF-1', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, RHF_singlet, TDA]), # G09 rev E.01
pytest.param( 'RHF-3', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, RHF_triplet, RPA]), # G09 rev E.01
pytest.param( 'RHF-3', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, RHF_triplet, TDA]), # G09 rev E.01
pytest.param( 'UHF', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, UHF, RPA]), # G09 rev E.01
pytest.param( 'UHF', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, UHF, TDA]), # G09 rev E.01
pytest.param( 'RHF-1', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, RHF_singlet, RPA]), # G09 rev E.01
pytest.param( 'RHF-1', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, RHF_singlet, TDA]), # G09 rev E.01
pytest.param( 'RHF-3', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, RHF_triplet, RPA]), # G09 rev E.01
pytest.param( 'RHF-3', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, RHF_triplet, TDA]), # G09 rev E.01
pytest.param( 'UHF', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, UHF, RPA]), # G09 rev E.01
pytest.param( 'UHF', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, UHF, TDA]), # G09 rev E.01
pytest.param( 'RHF-1', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_singlet, RPA]), # G09 rev E.01
pytest.param( 'RHF-1', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_singlet, TDA]), # G09 rev E.01
pytest.param( 'RHF-3', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_triplet, RPA]), # G09 rev E.01
pytest.param( 'RHF-3', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_triplet, TDA]), # G09 rev E.01
]) # yapf: disable
def test_tdscf(ref, func, ptype, basis, expected, wfn_factory, solver_funcs, engines):
if (ref == 'RHF-1') or (func == "HF"):
# RHF-singlet everything works and TDHF/CIS works for RHF-triplet, UHF
pass
elif (ref == 'RHF-3'):
pytest.xfail("RKS Vx kernel only Spin Adapted for Singlet")
elif (ref == 'UHF' and func != 'SVWN'):
pytest.xfail("UKS Vx kernel bug for non-lda")
### setup
# Look up expected in fixture (easier to read)
exp_lookup = "{}-{}-{}-{}".format(ref, func, ptype, basis)
exp_low_per_sym = {'A1': 100, 'A2': 100, 'B1': 100, "B2": 100}
for x in expected[exp_lookup]:
exp_low_per_sym[x['sym']] = min(x['e'], exp_low_per_sym[x['sym']])
exp_energy_sorted = np.array([x['e'] for x in expected[exp_lookup]])
exp_energy_sorted.sort()
# get wfn (don't use symmetry b/c too slow)
wfn = wfn_factory(ref, func, basis, nosym=True)
# select solver function (TDA->davidson/RPA->hamiltonian)
solver = solver_funcs[ptype]
# build engine
engine = engines[ref](wfn, ptype)
# skipping the entrypoint, just call the solver
out = solver(
engine=engine,
guess=engine.generate_guess(16),
max_vecs_per_root=10,
nroot=4,
verbose=1,
maxiter=30,
e_tol=1.0e-7,
r_tol=1.0e-5,
schmidt_tol=1.0e-12)
test_vals = out[0]
stats = out[-1]
assert stats[-1]['done'], "Solver did not converge"
for i, my_v in enumerate(test_vals):
ref_v = exp_energy_sorted[i]
assert compare_values(ref_v, my_v, 4, "{}-{}-{}-ROOT-{}".format(ref, func, ptype, i + 1))
|
CDSherrill/psi4
|
tests/pytests/test_tdscf_excitations.py
|
Python
|
lgpl-3.0
| 35,391
|
[
"Psi4"
] |
77cff648c99666abb74531063c2f9527542f35532057ce41df85610a3a2e520b
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
|
kurtraschke/camelot
|
camelot/view/wizard/pages/__init__.py
|
Python
|
gpl-2.0
| 1,068
|
[
"VisIt"
] |
cfd7852a26319b313f253bbf73dab217ce7575dfaa67e6b24cedcb3f6d2dc62d
|
"""Country names to ISO3166_alpha2 codes mapping
Roughly generated by the following bash script on GNU/Linux:
while read cc name; do
[ ! "$cc" ] &&
continue
out=$(isoquery $cc | cut -f3 --complement);
[ ! "$out" ] &&
out="$cc"
[ "$(echo $out | cut -f3)" = "$name" ] &&
name=''
echo -e "$out\t$name" |
sed -r 's/\s+$//' |
sed -r "s/\t/': ['/" |
sed -r "s/\t/', '/g" |
sed -r "s/^/'/" |
sed -r 's/$/'"'"',],/'
done < input/cc.list # cc.list from jVectorMap; format: lines start with ISO3166_alpha2_code else copied as is
Certain details updated by hand.
"""
CC_EUROPE = {
'_0': ['Kosovo', 'Kosovo, Republic of'],
'-99': ['N. Cyprus', 'North Cyprus'],
'AD': ['AND', 'Andorra'],
'AL': ['ALB', 'Albania'],
'AT': ['AUT', 'Austria'],
'AX': ['ALA', 'Åland Islands', 'Aland'],
'BA': ['BIH', 'Bosnia and Herzegovina', 'Bosnia and Herz.'],
'BE': ['BEL', 'Belgium'],
'BG': ['BGR', 'Bulgaria'],
'BY': ['BLR', 'Belarus'],
'CH': ['CHE', 'Switzerland'],
'CY': ['CYP', 'Cyprus'],
'CZ': ['CZE', 'Czech Republic', 'Czech Rep.'],
'DE': ['DEU', 'Germany'],
'DK': ['DNK', 'Denmark'],
'DZ': ['DZA', 'Algeria'],
'EE': ['EST', 'Estonia'],
'EG': ['EGY', 'Egypt'],
'ES': ['ESP', 'Spain'],
'FI': ['FIN', 'Finland'],
'FO': ['FRO', 'Faroe Islands', 'Faeroe Is.'],
'FR': ['FRA', 'France'],
'GB': ['GBR', 'United Kingdom'],
'GE': ['GEO', 'Georgia'],
'GG': ['GGY', 'Guernsey'],
'GR': ['GRC', 'Greece'],
'HR': ['HRV', 'Croatia'],
'HU': ['HUN', 'Hungary'],
'IE': ['IRL', 'Ireland'],
'IL': ['ISR', 'Israel'],
'IM': ['IMN', 'Isle of Man'],
'IQ': ['IRQ', 'Iraq'],
'IS': ['ISL', 'Iceland'],
'IT': ['ITA', 'Italy'],
'JE': ['JEY', 'Jersey'],
'JO': ['JOR', 'Jordan'],
'LB': ['LBN', 'Lebanon'],
'LI': ['LIE', 'Liechtenstein'],
'LT': ['LTU', 'Lithuania'],
'LU': ['LUX', 'Luxembourg'],
'LV': ['LVA', 'Latvia'],
'LY': ['LBY', 'Libya'],
'MA': ['MAR', 'Morocco'],
'MD': ['MDA', 'Moldova, Republic of', 'Moldova'],
'ME': ['MNE', 'Montenegro'],
'MK': ['MKD', 'Macedonia, Republic of', 'Macedonia'],
'MT': ['MLT', 'Malta'],
'NL': ['NLD', 'Netherlands'],
'NO': ['NOR', 'Norway'],
'PL': ['POL', 'Poland'],
'PS': ['PSE', 'Palestine, State of', 'Palestine'],
'PT': ['PRT', 'Portugal'],
'RO': ['ROU', 'Romania'],
'RS': ['SRB', 'Serbia'],
'RU': ['RUS', 'Russian Federation', 'Russia'],
'SA': ['SAU', 'Saudi Arabia'],
'SE': ['SWE', 'Sweden'],
'SI': ['SVN', 'Slovenia'],
'SK': ['SVK', 'Slovakia'],
'SM': ['SMR', 'San Marino'],
'SY': ['SYR', 'Syrian Arab Republic', 'Syria'],
'TN': ['TUN', 'Tunisia'],
'TR': ['TUR', 'Turkey'],
'UA': ['UKR', 'Ukraine'],
}
CC_WORLD = {
# Does NOT include CC_EUROPE
'_1': ['Somaliland',],
'AE': ['ARE', 'United Arab Emirates'],
'AF': ['AFG', 'Afghanistan'],
'AM': ['ARM', 'Armenia'],
'AO': ['AGO', 'Angola'],
'AR': ['ARG', 'Argentina'],
'AU': ['AUS', 'Australia'],
'AZ': ['AZE', 'Azerbaijan'],
'BD': ['BGD', 'Bangladesh'],
'BF': ['BFA', 'Burkina Faso'],
'BI': ['BDI', 'Burundi'],
'BJ': ['BEN', 'Benin'],
'BN': ['BRN', 'Brunei Darussalam', 'Brunei'],
'BO': ['BOL', 'Bolivia, Plurinational State of', 'Bolivia'],
'BR': ['BRA', 'Brazil'],
'BS': ['BHS', 'Bahamas'],
'BT': ['BTN', 'Bhutan'],
'BW': ['BWA', 'Botswana'],
'BZ': ['BLZ', 'Belize'],
'CA': ['CAN', 'Canada'],
'CD': ['COD', 'Congo, The Democratic Republic of the', 'Dem. Rep. Congo'],
'CF': ['CAF', 'Central African Republic', 'Central African Rep.'],
'CG': ['COG', 'Congo'],
'CI': ['CIV', "Côte d'Ivoire"],
'CL': ['CHL', 'Chile'],
'CM': ['CMR', 'Cameroon'],
'CN': ['CHN', 'China'],
'CO': ['COL', 'Colombia'],
'CR': ['CRI', 'Costa Rica'],
'CU': ['CUB', 'Cuba'],
'DJ': ['DJI', 'Djibouti'],
'DO': ['DOM', 'Dominican Republic', 'Dominican Rep.'],
'EC': ['ECU', 'Ecuador'],
'EH': ['ESH', 'Western Sahara', 'W. Sahara'],
'ER': ['ERI', 'Eritrea'],
'ET': ['ETH', 'Ethiopia'],
'FJ': ['FJI', 'Fiji'],
'FK': ['FLK', 'Falkland Islands [Malvinas]', 'Falkland Is.'],
'GA': ['GAB', 'Gabon'],
'GH': ['GHA', 'Ghana'],
'GL': ['GRL', 'Greenland'],
'GM': ['GMB', 'Gambia'],
'GN': ['GIN', 'Guinea'],
'GQ': ['GNQ', 'Equatorial Guinea', 'Eq. Guinea'],
'GT': ['GTM', 'Guatemala'],
'GW': ['GNB', 'Guinea-Bissau'],
'GY': ['GUY', 'Guyana'],
'HN': ['HND', 'Honduras'],
'HT': ['HTI', 'Haiti'],
'ID': ['IDN', 'Indonesia'],
'IN': ['IND', 'India'],
'IR': ['IRN', 'Iran, Islamic Republic of', 'Iran'],
'JM': ['JAM', 'Jamaica'],
'JP': ['JPN', 'Japan'],
'KE': ['KEN', 'Kenya'],
'KG': ['KGZ', 'Kyrgyzstan'],
'KH': ['KHM', 'Cambodia'],
'KP': ['PRK', "Korea, Democratic People's Republic of", 'Dem. Rep. Korea', 'North Korea'],
'KR': ['KOR', 'Korea, Republic of', 'Korea', 'South Korea'],
'KW': ['KWT', 'Kuwait'],
'KZ': ['KAZ', 'Kazakhstan'],
'LA': ['LAO', "Lao People's Democratic Republic", 'Lao PDR'],
'LK': ['LKA', 'Sri Lanka'],
'LR': ['LBR', 'Liberia'],
'LS': ['LSO', 'Lesotho'],
'MG': ['MDG', 'Madagascar'],
'ML': ['MLI', 'Mali'],
'MM': ['MMR', 'Myanmar'],
'MN': ['MNG', 'Mongolia'],
'MR': ['MRT', 'Mauritania'],
'MW': ['MWI', 'Malawi'],
'MX': ['MEX', 'Mexico'],
'MY': ['MYS', 'Malaysia'],
'MZ': ['MOZ', 'Mozambique'],
'NA': ['NAM', 'Namibia'],
'NC': ['NCL', 'New Caledonia'],
'NE': ['NER', 'Niger'],
'NG': ['NGA', 'Nigeria'],
'NI': ['NIC', 'Nicaragua'],
'NP': ['NPL', 'Nepal'],
'NZ': ['NZL', 'New Zealand'],
'OM': ['OMN', 'Oman'],
'PA': ['PAN', 'Panama'],
'PE': ['PER', 'Peru'],
'PG': ['PNG', 'Papua New Guinea'],
'PH': ['PHL', 'Philippines'],
'PK': ['PAK', 'Pakistan'],
'PR': ['PRI', 'Puerto Rico'],
'PY': ['PRY', 'Paraguay'],
'QA': ['QAT', 'Qatar'],
'RW': ['RWA', 'Rwanda'],
'SB': ['SLB', 'Solomon Islands', 'Solomon Is.'],
'SD': ['SDN', 'Sudan'],
'SL': ['SLE', 'Sierra Leone'],
'SN': ['SEN', 'Senegal'],
'SO': ['SOM', 'Somalia'],
'SR': ['SUR', 'Suriname'],
'SS': ['SSD', 'South Sudan', 'S. Sudan'],
'SV': ['SLV', 'El Salvador'],
'SZ': ['SWZ', 'Swaziland'],
'TD': ['TCD', 'Chad'],
'TF': ['ATF', 'French Southern Territories', 'Fr. S. Antarctic Lands'],
'TG': ['TGO', 'Togo'],
'TH': ['THA', 'Thailand'],
'TJ': ['TJK', 'Tajikistan'],
'TL': ['TLS', 'Timor-Leste'],
'TM': ['TKM', 'Turkmenistan'],
'TT': ['TTO', 'Trinidad and Tobago'],
'TW': ['TWN', 'Taiwan, Province of China', 'Taiwan'],
'TZ': ['TZA', 'Tanzania, United Republic of', 'Tanzania'],
'UG': ['UGA', 'Uganda'],
'US': ['USA', 'United States', 'United States of America'],
'UY': ['URY', 'Uruguay'],
'UZ': ['UZB', 'Uzbekistan'],
'VE': ['VEN', 'Venezuela, Bolivarian Republic of', 'Venezuela'],
'VN': ['VNM', 'Viet Nam', 'Vietnam'],
'VU': ['VUT', 'Vanuatu'],
'YE': ['YEM', 'Yemen'],
'ZA': ['ZAF', 'South Africa'],
'ZM': ['ZMB', 'Zambia'],
'ZW': ['ZWE', 'Zimbabwe'],
}
CC_WORLD.update(CC_EUROPE)
CC_USA = {
'US-AK': ['AK', 'Alaska'],
'US-AL': ['AL', 'Alabama'],
'US-AR': ['AR', 'Arkansas'],
'US-AZ': ['AZ', 'Arizona'],
'US-CA': ['CA', 'California'],
'US-CO': ['CO', 'Colorado'],
'US-CT': ['CT', 'Connecticut'],
'US-DC': ['DC', 'District of Columbia'],
'US-DE': ['DE', 'Delaware'],
'US-FL': ['FL', 'Florida'],
'US-GA': ['GA', 'Georgia'],
'US-HI': ['HI', 'Hawaii'],
'US-IA': ['IA', 'Iowa'],
'US-ID': ['ID', 'Idaho'],
'US-IL': ['IL', 'Illinois'],
'US-IN': ['IN', 'Indiana'],
'US-KS': ['KS', 'Kansas'],
'US-KY': ['KY', 'Kentucky'],
'US-LA': ['LA', 'Louisiana'],
'US-MA': ['MA', 'Massachusetts'],
'US-MD': ['MD', 'Maryland'],
'US-ME': ['ME', 'Maine'],
'US-MI': ['MI', 'Michigan'],
'US-MN': ['MN', 'Minnesota'],
'US-MO': ['MO', 'Missouri'],
'US-MS': ['MS', 'Mississippi'],
'US-MT': ['MT', 'Montana'],
'US-NC': ['NC', 'North Carolina'],
'US-ND': ['ND', 'North Dakota'],
'US-NE': ['NE', 'Nebraska'],
'US-NH': ['NH', 'New Hampshire'],
'US-NJ': ['NJ', 'New Jersey'],
'US-NM': ['NM', 'New Mexico'],
'US-NV': ['NV', 'Nevada'],
'US-NY': ['NY', 'New York'],
'US-OH': ['OH', 'Ohio'],
'US-OK': ['OK', 'Oklahoma'],
'US-OR': ['OR', 'Oregon'],
'US-PA': ['PA', 'Pennsylvania'],
'US-RI': ['RI', 'Rhode Island'],
'US-SC': ['SC', 'South Carolina'],
'US-SD': ['SD', 'South Dakota'],
'US-TN': ['TN', 'Tennessee'],
'US-TX': ['TX', 'Texas'],
'US-UT': ['UT', 'Utah'],
'US-VA': ['VA', 'Virginia'],
'US-VT': ['VT', 'Vermont'],
'US-WA': ['WA', 'Washington'],
'US-WI': ['WI', 'Wisconsin'],
'US-WV': ['WV', 'West Virginia'],
'US-WY': ['WY', 'Wyoming'],
}
def _invert_mapping(dict):
return {v:k for k in dict for v in dict[k]}
INV_CC_EUROPE = _invert_mapping(CC_EUROPE)
INV_CC_WORLD = _invert_mapping(CC_WORLD)
INV_CC_USA = _invert_mapping(CC_USA)
SET_CC_EUROPE = set(INV_CC_EUROPE.keys()) | set(INV_CC_EUROPE.values())
SET_CC_USA = set(INV_CC_USA.keys()) | set(INV_CC_USA.values())
|
kernc/orange3-text
|
orangecontrib/text/country_codes.py
|
Python
|
bsd-2-clause
| 9,422
|
[
"BWA"
] |
1f112e0a0a4dfdaacd2165e63cb670c5395f5ea8b66b5ac8e06b120553d97169
|
import numpy as np
from utils import *
from ilqr import LQR
# implements guided policy search algorithm
# based on work of S Levine, P Abbeel
#
class GPS( object ):
def __init__( self, params ):
self.x_len = params[ 'x_len' ]
self.u_len = params[ 'u_len' ]
self.num_gaussians = params[ 'num_gaussians' ]
def estimate_linear_controllers( self, xu_train ):
# estimate linear gaussian controller in the form of x+1 = x + kx + noise
estimated_linear_controllers = []
# create X_dynamics with organisation:
# [num rollouts] [time step] [xt+1 xt u]
XU_dynamics = np.concatenate( (xu_train[ :, 1:, :self.x_len], xu_train[ :, :-1, :]), axis=2 )
# swap rollout and time axe
xu = np.swapaxes( XU_dynamics, 0, 1 )
# for each timestep
for i in range( xu.shape[ 0 ] ):
lin_reg = Lin_Gaussian_Estimator( self.x_len )
lin_reg.fit( xu[i, :, :], self.x_len, num_gaussians=self.num_gaussians )
estimated_linear_controllers.append( lin_reg )
# copy last controller to ensure we can span whole time range
estimated_linear_controllers.append( lin_reg )
return estimated_linear_controllers
def train( self, xu_train, o_train, system_cost, gps_params ):
"""
x_train - training state data
o_train - training image data
objective_f - objective function
x_train dimensions: [ num_rollouts ][ num_time steps ][ x_vec, u_vec ]
o_train dimensions: [ num_rollouts ][ num_time steps ][ width, height ]
returns Policy
"""
# assert( x_train.shape[0] == o_train.shape[0] )
# assert( x_train.shape[1] == o_train.shape[1] )
# TODO - better error handling
# TODO - add diagnostics and info capture
# estimate linear controllers
estimated_linear_controllers = self.estimate_linear_controllers( xu_train )
lqr = LQR( self.x_len, self.u_len )
# TODO run loop for K times or when error improvement is less than epsilon
for k in range( gps_params[ 'K'] ):
# optimise supervised learning
# TODO:
# sample x, o from stored states and their coresponding observations
# minimise objective function
# use SGD to train
# Questions:
# how is covariance Cti estimated = this is covariance of p( ut | xt )
# how is importance sampling utilized?
# how is training data prepared?
# optimise linear controllers using LQR
linear_controllers = lqr.LQR( xu_train, estimated_linear_controllers, system_cost, 1.0 ) # TODO add lagrange multipliers
# update Lagrange multipliers
return policy
def merge_data( x_train, o_train, x_run, o_run ):
pass
|
marcino239/gps
|
gps.py
|
Python
|
gpl-3.0
| 2,556
|
[
"Gaussian"
] |
032940e6d1b4868a88e458d36a04876e59c7b7cda1eb7fb3ed071e17fb3af392
|
#!/usr/bin/python
## A PYTHON script to convert CRYSTAL output to Qwalk input,
## based on Qwalk crystal2qmc
##
## Author:
## Huihuo Zheng,
## Department of Physics, UIUC
## hzheng8@illinois.edu / zhenghh04@gmail.com
## Date: 02-02-2013
#############################################################################
from numpy import *
import os, sys
upf=1
if len(sys.argv)<2:
print "Usage: crystal2qmc_wrap.py CRYSTALOUTPUT"
exit()
f = open(sys.argv[1], "r")
print "read crystal output from file "+sys.argv[1]
def readstr(string, f):
if string in f.read():
f.seek(0)
line=f.readline()
while line.find(string)==-1:
line=f.readline()
return line
else:
return "NOT FOUND"
print "Finding number of k-points ..."
line = readstr("NUMBER OF K POINTS IN THE IBZ", f)
if (line !="NOT FOUND"):
item = line.split()
nkpts = int(item[len(item)-1])
print "NUMBER OF K POINTS: ", nkpts
else:
print "I don't know the number of k points"
exit()
nline=(nkpts+3)/4
line = readstr("K POINTS COORDINATES", f)
kr=[]
ki=[]
m=0
n=0
if (line !="NOT FOUND"):
for i in range(nline):
items = f.readline().split()
m = 0
while ( n < nkpts and m < len(items)/4):
if items[4*m].find("R")!=-1:
kr.append(4*i+m)
else:
ki.append(4*i+m)
n = n+1
m = m+1
line = readstr("WEIGHT OF K POINTS - MONKHORST", f)
if (line != "NOT FOUND"):
n = 0
kweights = []
kw = open("kweights.dat", "w")
while ( n < nkpts):
items = f.readline().split()
for d in items:
kweights.append(float(d))
n +=1
kw.write("%s\n" %float(d))
kw.close()
else:
print "I don't find the weights for k"
if len(sys.argv)>2 and sys.argv[2]=="weights":
print "Generate k-points weights only, to kweights.dat\nnow exiting..."
exit()
else:
print "Generating qwalk input..."
print "real k-point index", kr
print "complex k-point index", ki
#line = readstr("SUMMED SPIN DENSITY", f)
#if (line !="NOT FOUND"):
# items = line.split()
# spin = float(items[len(items)-1])
# print "TOTAL SPIN DENSITY: ", spin
'''
if ( spin < 0.1 ):
print "Treated as singlet states"
upf = 1
else:
print "Treated as doublet..."
upf = 2
'''
# upf = int(spin) + 1
j=0
for i in kr:
fout=open("crystal2qmc.inp", "w")
# fout.write("%s" %upf+"\n"+"%s" %j)
fout.write("%s" %j)
fout.close()
os.system(". ~/.bashrc; runcrystal2qmc -o qwalk_%s %s < crystal2qmc.inp" %(i, sys.argv[1]))
os.system("cp qwalk_%s.jast2 qwalk.jast2; rm qwalk_%s.jast2" %(i, i))
j +=1
j=0
for i in ki:
fout=open("crystal2qmc.inp", "w")
# fout.write("%s" %upf+"\n"+"%s" %j)
fout.close()
os.system(". ~/.bashrc; runcrystal2qmc -c -o qwalk_%s %s < crystal2qmc.inp" %(i, sys.argv[1]))
j +=1
os.system("cp qwalk_%s.jast2 qwalk.jast2; rm qwalk_%s.jast2" %(i, i))
exit()
|
bbusemeyer/mainline
|
utils/crystal2qmc_wrap.py
|
Python
|
gpl-2.0
| 3,044
|
[
"CRYSTAL"
] |
1e4f28a903ddf3b03291b35b4f73fa841e8c29ecb7e37319f230c5f92e356575
|
#-----------------------------------------------------------------------------
# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import zmq
from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
class TestMultipart(BaseZMQTestCase):
def test_router_dealer(self):
router, dealer = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)
msg1 = b'message1'
dealer.send(msg1)
ident = self.recv(router)
more = router.rcvmore
self.assertEqual(more, True)
msg2 = self.recv(router)
self.assertEqual(msg1, msg2)
more = router.rcvmore
self.assertEqual(more, False)
def test_basic_multipart(self):
a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
msg = [ b'hi', b'there', b'b']
a.send_multipart(msg)
recvd = b.recv_multipart()
self.assertEqual(msg, recvd)
if have_gevent:
class TestMultipartGreen(GreenTest, TestMultipart):
pass
|
IsCoolEntertainment/debpkg_python-pyzmq
|
zmq/tests/test_multipart.py
|
Python
|
lgpl-3.0
| 1,579
|
[
"Brian"
] |
0246b65f097b062e907d4645fa278d6c0c7c6e3d6611eeb59c657726e70e89d4
|
import glob
files = glob.glob('*.cc')
files += glob.glob('*.h')
#rep_list = ['ci_tol.h', 'civect.h', 'ciwave.h', 'globaldefs.h odometer.h', 'slaterd.h', 'structs.h']
rep_list = ['globaldefs.h', 'odometer.h']
for fn in files:
with open(fn, 'r') as f:
data = f.read()
for string in rep_list:
fname = '#include "**REP**"'
rname = '#include "psi4/detci/**REP**"'
fname = fname.replace('**REP**', string)
rname = rname.replace('**REP**', string)
data = data.replace(fname, rname)
with open(fn, 'w') as f:
f.write(data)
|
kannon92/psi4
|
psi4/src/psi4/detci/replacer.py
|
Python
|
gpl-2.0
| 594
|
[
"Psi4"
] |
f274103c5d0b496a489c5f3ffc9bb205bb70662238c761a7084fb4db0aa2f6c6
|
from rdkit import Chem
from copy import copy
from pipelines_utils import utils
from molvs import enumerate_tautomers_smiles,canonicalize_tautomer_smiles,Standardizer
from molvs.charge import Uncharger,Reionizer
from standardiser import standardise
standardizer = Standardizer()
def _spam(n):
out=[]
for perm in _getPerms(n):
elem = [ int(i) for i in list(perm) ]
out.append(elem)
return out
def _getPerms(n):
from itertools import permutations
for i in _getCandidates(n):
for perm in set(permutations(i)):
yield ''.join(perm)
def _getCandidates(n):
for i in range(0, n+1):
res = "1" * i + "0" * (n - i)
yield res
def enumerateTautomers(mol):
"""
Get all of the Tautomers of a given molecule
:param mol: the input molecule
:return: a list of Tautomers
"""
smiles = Chem.MolToSmiles(mol,isomericSmiles=True)
tauts = enumerate_tautomers_smiles(smiles)
##TODO Append Parent molecule name
return [Chem.MolFromSmiles(x) for x in tauts]
def getCanonTautomer(mol):
"""
Get the canonical tautomer form
:param mol: the input molecule
:return: a list of Tautomers
"""
smiles = Chem.MolToSmiles(mol,isomericSmiles=True)
x = canonicalize_tautomer_smiles(smiles)
return Chem.MolFromSmiles(x)
def enumerateStereoIsomers(mol):
out = []
chiralCentres = Chem.FindMolChiralCenters(mol, includeUnassigned=True)
#return the molecule object when no chiral centres where identified
if chiralCentres == []:
return [mol]
#All bit permutations with number of bits equals number of chiralCentres
elements = _spam(len(chiralCentres))
for isoId,element in enumerate(elements):
for centreId,i in enumerate(element):
atomId = chiralCentres[centreId][0]
if i == 0:
mol.GetAtomWithIdx(atomId).SetChiralTag(Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW)
elif i == 1:
mol.GetAtomWithIdx(atomId).SetChiralTag(Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW)
outmol = copy(mol)
utils.log("Enumerated ", Chem.MolToSmiles(mol, isomericSmiles=True))
out.append(outmol)
return out
def molVsStandardizer(mol):
return standardizer.standardize(mol)
def flatkinsonStandardizer(mol):
return standardise.run(mol)
STANDARD_MOL_METHODS = {"molvs": molVsStandardizer, "flatkinson": flatkinsonStandardizer}
def getNeutralMolecule(mol):
uncharger = Uncharger()
return uncharger.uncharge(mol)
def getReionisedMolecule(mol):
reioniser = Reionizer()
return reioniser.reionize(mol)
|
InformaticsMatters/pipelines
|
src/python/pipelines/rdkit/sanify_utils.py
|
Python
|
apache-2.0
| 2,643
|
[
"RDKit"
] |
f00b5e763b546e68c25898e6767e0e2b251507eba9f339f28ad0d0714b4e594e
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/11-ferrofluid/11-ferrofluid_part1.py",
equil_steps=200, equil_rounds=10)
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
def test(self):
self.assertTrue(
int(np.sum(tutorial.n_clusters)) == len(tutorial.cluster_sizes))
for i in range(8):
self.assertLess(
tutorial.size_dist[0][i + 1],
tutorial.size_dist[0][i])
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/scripts/tutorials/test_11-ferrofluid_1.py
|
Python
|
gpl-3.0
| 1,339
|
[
"ESPResSo"
] |
bce301b3db84d34a1e00b6fb12a03429b3eea6a0888b4eceb21c2f90a0a264b1
|
from lib.keras_utils import *
from lib.utils import *
from parameters import *
EPS = 1e-10 # Epsilon
MIN_CP = -2. # Minimum power index of c
MAX_CP = 2. # Maximum power index of c
SCORE_THRES = 0.9 # Softmax score threshold to consider success of attacks
PROG_PRINT_STEPS = 50 # Print progress every certain steps
EARLYSTOP_STEPS = 5000 # Early stopping if no improvement for certain steps
class OptCarlini:
"""
This class implements adversarial examples generator modeled after Carlini
et al. (https://arxiv.org/abs/1608.04644). It also includes options to use
other losses, change of variable, optimizer with bounding box (L-BFGS-B,
etc.) and mask.
"""
def _setup_opt(self):
"""Used to setup optimization when c is updated"""
# obj_func = c * loss + l2-norm(d)
self.f = self.c * self.loss + self.norm
# Setup optimizer
if self.use_bound:
# Use Scipy optimizer with upper and lower bound [0, 1]
self.optimizer = ScipyOptimizerInterface(
self.f, var_list=self.var_list, var_to_bounds={
self.x_in: (0, 1)},
method="L-BFGS-B")
else:
# Use learning rate decay
global_step = tf.Variable(0, trainable=False)
# decay_lr = tf.train.exponential_decay(
# self.lr, global_step, 500, 0.95, staircase=True)
if self.decay:
lr = tf.train.inverse_time_decay(
self.lr, global_step, 10, 0.01)
else:
lr = self.lr
# Use Adam optimizer
self.optimizer = tf.train.AdamOptimizer(
learning_rate=lr, beta1=0.9, beta2=0.999, epsilon=1e-08)
self.opt = self.optimizer.minimize(
self.f, var_list=self.var_list, global_step=global_step)
def __init__(self, model, target=True, c=1, lr=0.01, init_scl=0.1,
use_bound=False, loss_op=0, k=5, var_change=True,
use_mask=True, decay=True):
"""
Initialize the optimizer. Default values of the parameters are
recommended and decided specifically for attacking traffic sign
recognizer trained on GTSRB dataset.
Parameters
----------
model : Keras model
Target model to attack
target : (optional) bool (default: True)
True if performing targeted attack; False, otherwise.
c : (optional) float (default: 1)
Constant balancing the objective function (f) between norm
of perturbation and loss (f = c * loss + norm). Larger c
means stronger attack but also more "visible" (stronger
perturbation).
lr : (optional) float (default: 0.01)
Learning rate of optimizer
init_scl : (optional) float (default: 0.1)
Standard deviation of Gaussian used to initialize objective
variable
use_bound : (optional) bool (default: False)
If True, optimizer with bounding box [0, 1] will be used.
Otherwise, Adam optimizer is used.
loss_op : (optional) int (default: 0)
Option for loss function to optimize over.
loss_op = 0: Carlini's l2-attack
loss_op = 1: cross-entropy loss
k : (optional) float (default: 5)
"Confidence threshold" used with loss_op = 0. Used to
control strength of attack. The higher the k the stronger
the attack.
var_change : (optional) bool (default: True)
If True, objective variable will be changed according to
Carlini et al. (which also gets rid of the need to use
any bounding) Otherwise, optimize directly on perturbation.
use_mask : (optional) bool (default: True)
if True, perturbation will be masked before applying to
the target sign. Mask must be specified when calling
optimize() and optimize_search().
"""
self.model = model
self.target = target
self.c = c
self.lr = lr
self.init_scl = init_scl
self.use_bound = use_bound
self.loss_op = loss_op
self.k = k
self.var_change = var_change
self.use_mask = use_mask
self.decay = decay
# Initialize variables
init_val = np.random.normal(scale=init_scl, size=INPUT_SHAPE)
self.x = K.placeholder(dtype='float32', shape=INPUT_SHAPE)
self.y = K.placeholder(dtype='float32', shape=(1, OUTPUT_DIM))
if self.use_mask:
self.m = K.placeholder(dtype='float32', shape=INPUT_SHAPE)
# If change of variable is specified
if var_change:
# Optimize on w instead of d
self.w = tf.Variable(initial_value=init_val, trainable=True,
dtype=tf.float32)
x_full = (0.5 + EPS) * (tf.tanh(self.w) + 1)
self.d = x_full - self.x
if self.use_mask:
self.d = tf.multiply(self.d, self.m)
self.x_in = self.x + self.d
self.var_list = [self.w]
else:
# Optimize directly on d (perturbation)
self.d = tf.Variable(initial_value=init_val, trainable=True,
dtype=tf.float32)
if self.use_mask:
self.d = tf.multiply(self.d, self.m)
self.x_in = self.x + self.d
self.var_list = [self.d]
model_output = self.model(self.x_in)
if loss_op == 0:
# Carlini l2-attack's loss
# Get 2 largest outputs
output_2max = tf.nn.top_k(model_output, 2)[0][0]
# Find z_i = max(Z[i != y])
i_y = tf.argmax(self.y, axis=1, output_type=tf.int32)[0]
i_max = tf.to_int32(tf.argmax(model_output, axis=1)[0])
z_i = tf.cond(tf.equal(i_y, i_max),
lambda: output_2max[1], lambda: output_2max[0])
if self.target:
# loss = max(max(Z[i != t]) - Z[t], -K)
self.loss = tf.maximum(z_i - model_output[0, i_y], -self.k)
else:
# loss = max(Z[y] - max(Z[i != y]), -K)
self.loss = tf.maximum(model_output[0, i_y] - z_i, -self.k)
elif loss_op == 1:
# Cross entropy loss, loss = -log(F(x_t))
self.loss = K.categorical_crossentropy(
self.y, model_output, from_logits=True)[0]
if not self.target:
# loss = log(F(x_y))
self.loss *= -1
else:
raise ValueError("Invalid loss_op")
# Regularization term with l2-norm
self.norm = tf.norm(self.d, ord='euclidean')
# Call a helper function to finalize and set up optimizer
self._setup_opt()
def optimize(self, x, y, weights_path, n_step=1000, prog=True, mask=None):
"""
Run optimization attack, produce adversarial example from a single
sample, x.
Parameters
----------
x : np.array
Original benign sample
y : np.array
One-hot encoded target label if <target> was set to True or
one-hot encoded true label, otherwise.
n_step : (optional) int
Number of steps to run optimization
prog : (optional) bool
True if progress should be printed
mask : (optional) np.array of 0 or 1, shape=(n_sample, height, width)
Mask to restrict gradient update on valid pixels
Returns
-------
x_adv : np.array, shape=INPUT_SHAPE
Output adversarial example created from x
"""
with tf.Session() as sess:
# Initialize variables and load weights
sess.run(tf.global_variables_initializer())
self.model.load_weights(weights_path)
# Ensure that shape is correct
x_ = np.copy(x).reshape(INPUT_SHAPE)
y_ = np.copy(y).reshape((1, OUTPUT_DIM))
if self.var_change:
# Initialize w = arctanh( 2(x + noise) - 1 )
init_rand = np.random.normal(
-self.init_scl, self.init_scl, size=INPUT_SHAPE)
# Clip values to remove numerical error atanh(1) or atanh(-1)
tanh_w = np.clip((x_ + init_rand) * 2 - 1, -1 + EPS, 1 - EPS)
self.w.load(np.arctanh(tanh_w))
# Include mask in feed_dict if mask is used
if self.use_mask:
m_ = np.repeat(
mask[np.newaxis, :, :, np.newaxis], N_CHANNEL, axis=3)
feed_dict = {self.x: x_, self.y: y_,
self.m: m_, K.learning_phase(): False}
else:
feed_dict = {self.x: x_, self.y: y_, K.learning_phase(): False}
# Set up some variables for early stopping for loss_op = 0
min_norm = float("inf")
min_d = None
earlystop_count = 0
# Start optimization
for step in range(n_step):
if self.use_bound:
self.optimizer.minimize(sess, feed_dict=feed_dict)
else:
sess.run(self.opt, feed_dict=feed_dict)
# Keep track of "best" solution
if self.loss_op == 0:
norm = sess.run(self.norm, feed_dict=feed_dict)
loss = sess.run(self.loss, feed_dict=feed_dict)
# Save working adversarial example with smallest norm
if loss == -self.k:
if norm < min_norm:
min_norm = norm
min_d = sess.run(self.d, feed_dict=feed_dict)
# Reset early stopping counter
earlystop_count = 0
else:
earlystop_count += 1
# Early stop if no improvement
if earlystop_count > EARLYSTOP_STEPS:
print(step, min_norm)
break
# Print progress
if (step % PROG_PRINT_STEPS == 0) and prog:
f = sess.run(self.f, feed_dict=feed_dict)
norm = sess.run(self.norm, feed_dict=feed_dict)
loss = sess.run(self.loss, feed_dict=feed_dict)
print("Step: {}, norm={:.3f}, loss={:.3f}, obj={:.3f}".format(
step, norm, loss, f))
if min_d is not None:
x_adv = (x_ + min_d).reshape(IMG_SHAPE)
return x_adv, min_norm
else:
d = sess.run(self.d, feed_dict=feed_dict)
norm = sess.run(self.norm, feed_dict=feed_dict)
x_adv = (x_ + d).reshape(IMG_SHAPE)
return x_adv, norm
def optimize_search(self, x, y, n_step=1000, search_step=10, prog=True,
mask=None):
"""
Run optimization attack, produce adversarial example from a single
sample, x. Does binary search on log_10(c) to find optimal value of c.
Parameters
----------
x : np.array
Original benign sample
y : np.array, shape=(OUTPUT_DIM,)
One-hot encoded target label if <target> was set to True or
one-hot encoded true label, otherwise.
n_step : (optional) int
Number of steps to run optimization
search_step : (optional) int
Number of steps to search on c
prog : (optional) bool
True if progress should be printed
mask : (optional) np.array of 0 or 1, shape=(n_sample, height, width)
Mask to restrict gradient update on valid pixels
Returns
-------
x_adv_suc : np.array, shape=INPUT_SHAPE
Successful adversarial example created from x. None if fail.
norm_suc : float
Perturbation magnitude of x_adv_suc. None if fail.
"""
# Declare min-max of search line [1e-2, 1e2] for c = 1e(cp)
cp_lo = MIN_CP
cp_hi = MAX_CP
x_adv_suc = None
norm_suc = None
start_time = time.time()
# Binary search on cp
for c_step in range(search_step):
# Update c
cp = (cp_lo + cp_hi) / 2
self.c = 10 ** cp
self._setup_opt()
# Run optimization with new c
x_adv, norm = self.optimize(
x, y, n_step=n_step, prog=False, mask=mask)
# Evaluate result
y_pred = self.model.predict(x_adv.reshape(INPUT_SHAPE))[0]
score = softmax(y_pred)[np.argmax(y)]
if self.target:
if score > SCORE_THRES:
# Attack succeeded, decrease cp to lower norm
cp_hi = cp
x_adv_suc = np.copy(x_adv)
norm_suc = norm
else:
# Attack failed, increase cp for stronger attack
cp_lo = cp
else:
if score > 1 - SCORE_THRES:
# Attack failed
cp_lo = cp
else:
# Attack succeeded
cp_hi = cp
x_adv_suc = np.copy(x_adv)
norm_suc = norm
if prog:
print("c_Step: {}, c={:.4f}, score={:.3f}".format(
c_step, self.c, score))
print("Finished in {:.2f}s".format(time.time() - start_time))
return x_adv_suc, norm_suc
|
chawins/aml
|
lib/OptCarlini.py
|
Python
|
mit
| 14,164
|
[
"Gaussian"
] |
5ac50621474a019a2311600f05a2a8145181b8fd242e1ad4e83117182697cf9d
|
# -*- coding:utf-8 -*-
# Copyright (c) 2015, Galaxy Authors. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Author: wangtaize@baidu.com
# Date: 2015-03-30
class ValidateException(Exception):
pass
def validate_init_service_group_req(request):
ret = {}
name = request.POST.get('name',None)
if not name:
raise ValidateException("name is required")
ret['name'] = name
# package validate logic
pkg_type_str = request.POST.get("pkgType",None)
if not pkg_type_str:
raise ValidateException("package type is required")
pkg_type = int(pkg_type_str)
ret["pkg_type"] = pkg_type
pkg_src = request.POST.get("pkgSrc",None)
if not pkg_src:
raise ValidateException("pkgSrc is required")
ret['pkg_src'] = pkg_src
ret['start_cmd'] = request.POST.get("startCmd",None)
if not ret['start_cmd']:
raise ValidateException("startCmd is required")
#cpu validate logic
cpu_share_str = request.POST.get("cpuShare",None)
if not cpu_share_str:
raise ValidateException("cpu_share is required")
cpu_share =float(cpu_share_str)
ret['cpu_share'] = cpu_share
#memory validate logic
memory_limit_str = request.POST.get("memoryLimit",None)
if not memory_limit_str:
raise ValidateException("memoryLimit is required")
ret['memory_limit'] = int(memory_limit_str)
# replicate count
replicate_count_str = request.POST.get("replicate",None)
if not replicate_count_str:
raise ValidateException("replicate is required")
ret['replicate_count'] = int(replicate_count_str)
if ret['replicate_count'] < 0:
raise ValidateException("replicate_count is invalidate")
deploy_step_size_str = request.POST.get("deployStepSize",None)
if not deploy_step_size_str:
ret["deploy_step_size"] = -1
else:
ret["deploy_step_size"] = int(deploy_step_size_str)
one_task_per_host_str = request.POST.get("oneTaskPerHost","false")
if one_task_per_host_str == 'true':
ret['one_task_per_host'] = True
else:
ret['one_task_per_host'] = False
tag_str = request.POST.get("tag",None)
if not tag_str:
ret['tag'] = []
else:
ret['tag'] = [tag_str.strip()]
return ret
|
linyvxiang/galaxy
|
console/backend/src/console/taskgroup/helper.py
|
Python
|
bsd-3-clause
| 2,337
|
[
"Galaxy"
] |
097f915307fa2ef7d9ac0b1a989aa52d7ecd5b8bb852e064a585ded1789e722b
|
'''
Created on 23/11/2009
@author: brian
'''
from scipysim.actors.signal import Ramp
from scipysim.actors.display import Plotter
from scipysim.actors import Channel, CompositeActor
class RampPlot(CompositeActor):
'''This example simulation connects a ramp source to a plotter.'''
def __init__(self):
'''Create the components'''
super(RampPlot, self).__init__()
connection = Channel()
src = Ramp(connection)
dst = Plotter(connection, xlabel='Time (s)', ylabel='Amplitude', title='Ramp Plot')
self.components = [src, dst]
if __name__ == '__main__':
RampPlot().run()
|
hardbyte/scipy-sim
|
scipysim/models/scripted_ramp_plot.py
|
Python
|
gpl-3.0
| 628
|
[
"Brian"
] |
8988cf150b76ed0abedbf280d740c67f498c53c85892154cc39ed478c3dedae9
|
# -*- coding: utf-8 -*-
import math
import os
import tensorflow as tf
import numpy as np
import pandas as pd
import pickle
import pickle as pkl
import cv2
import skimage
import random
import tensorflow.python.platform
from tensorflow.python.ops import rnn
from keras.preprocessing import sequence
from collections import Counter
from collections import defaultdict
import itertools
test_image_path='./data/acoustic-guitar-player.jpg'
vgg_path='./data/vgg16-20160129.tfmodel'
n=50000-2
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,capts,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
wd={}
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi).lower()]
i+=1
word=word_list[i-1].lower()
word_list[i-1]=word
if word not in wd:
wd[word]=[]
wd[word].append(def_list[-1])
else:
dl.pop(i)
word_list.pop(i)
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
_map,rev_map=get_one_hot_map(word_list,def_list,n,captlist=capts)
# pkl.dump(_map,open('mapaoh.pkl','wb'))
# pkl.dump(rev_map,open('rev_mapaoh.pkl','wb'))
_map=pkl.load(open('mapaoh.pkl','rb'))
rev_map=pkl.load(open('rev_mapaoh.pkl','rb'))
if num_samples is not None:
num_samples=len(capts)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
if capts is not None:
# y,mask,auxsent,auxmask,auxword,auxchoices = map_one_hot(capts[:num_samples],_map,maxlen,n,aux=True,wd=wd)
# np.save('maskmainaux',mask)
# np.save('ycoh',y)
# np.save('yaux',auxsent)
# np.save('maskaux',auxmask)
# np.save('Xaux',auxword)
# np.save('caux',auxchoices)
# exit()
print capts
y=np.load('ycoh.npy')#,'r')
auxmask=np.load('maskaux.npy')#,'r')
mask=np.load('maskmainaux.npy')#,'r')
auxword=np.load('Xaux.npy')#,'r')
auxsent=np.load('yaux.npy')#,'r')
auxchoices=np.load('caux.npy')#,'r')
else:
# np.save('X',X)
# np.save('yc',y)
# np.save('maskc',mask)
mask=np.load('maskaoh.npy','r')
y=np.load('yaoh.npy','r')
X=np.load('Xaoh.npy')#,'r')
print (np.max(y))
if capts is not None:
return X, y,mask,rev_map,auxsent,auxmask,auxword,auxchoices
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n,captlist=None):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(map_lambda)
rev_map=defaultdict(rev_map_lambda)
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
# print (len(words))
counts2=defaultdict(int)
if captlist is not None:
for line in captlist:
for word in line.split():
if word=='#START#':
continue
counts2[word.lower()]+=1
print len(counts.keys()),len(counts2.keys())
words=list(map(lambda z:z[0],reversed(sorted(counts2.items(),key=lambda x:x[1]))))[:n-len(words)]
# random.shuffle(words)
words=words[:3000]
for word in words:
if word in counts:
del counts[word]
print len(counts.keys()),len(counts2.keys())
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
# for num_bits in range(binary_dim):
# for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
# bitmap=np.zeros(binary_dim)
# bitmap[np.array(bit_config)]=1
# num=bitmap*(2** np.arange(binary_dim ))
# num=np.sum(num).astype(np.uint32)
# word=words[i]
# _map[word]=num
# rev_map[num]=word
# i+=1
# if i>=len(words):
# break
# if i>=len(words):
# break
_map['#START#']=0
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[n+2]='End'
# print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
for word in corpus:
mapped=_map[word]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n,aux=None,wd=None):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,_line in enumerate(corpus):
line=_line.lower()
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
rtn=np.zeros([len(corpus)],dtype=np.float32)
for l,_line in enumerate(corpus):
line=_line.lower()
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l]=mapped
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
rtn3=[]
rtn3=np.zeros([len(corpus),5,maxlen+2],dtype=np.int32)
rtn4=[]
rtn4=np.zeros([len(corpus),5,maxlen+2],dtype=np.float32)
rtn5=[]
rtn5=np.zeros([len(corpus),5,1],dtype=np.int32)
rtn6=[]
rtn6=np.zeros([len(corpus),5,1],dtype=np.float32)
for l,_line in enumerate(corpus):
x=0
line=_line.lower().split()
auxlist=[]
auxmask=[]
auxword=[]
auxchoices=[]
for i in range(min(len(line),maxlen-1)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
rtn[l,i+1]=mapped
y=0
if not (wd is None) and mapped!=n+1 and line[i] in wd:
tempsent=np.zeros([1,maxlen+2],dtype=np.int32)
_sent=random.choice(wd[line[i]])
tempmask=np.zeros([1,maxlen+2],dtype=np.float32)
tempword=np.ones([1,1],dtype=np.int32)
tempmask[0,1]=1.0
tempword*=mapped
tempchoice=np.ones([1,1],dtype=np.float32)
sent=_sent.split()
for j in range(min(len(sent),maxlen-1)):
m2=_map[sent[j]]
tempsent[0,j+1]=m2
tempmask[0,j+1]=1.0
y=j+1
tempsent[0,y]=0
tempmask[0,y]=1.0
auxlist.append(tempsent)
auxmask.append(tempmask)
auxword.append(tempword)
auxchoices.append(tempchoice)
if mapped==n+1:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
ilist=np.arange(len(auxlist))
if len(auxlist)>=5:
random.shuffle(ilist)
auxlist=np.concatenate(auxlist)
auxlist=auxlist[ilist[:5]]
elif len(auxlist)>0:
# print auxlist
# print [x.shape for x in auxlist]
auxlist+=[np.zeros([5-len(auxlist),maxlen+2],dtype=np.int32)]
# print auxlist
# print [x.shape for x in auxlist]
auxlist=np.concatenate(auxlist)
else:
auxlist=np.zeros([5,maxlen+2],dtype=np.int32)
# rtn3.append(auxlist)
rtn3[l,:,:]=auxlist
# print rtn3
if len(auxmask)>=5:
auxmask=np.concatenate(auxmask)
auxmask=auxmask[ilist[:5]]
elif len(auxmask)>0:
auxmask+=[np.zeros([5-len(auxmask),maxlen+2],dtype=np.float32)]
auxmask=np.concatenate(auxmask)
else:
auxmask=np.zeros([5,maxlen+2],dtype=np.float32)
# rtn4.append(auxmask)
# print l
rtn4[l,:,:]=auxmask
if len(auxword)>=5:
auxword=np.concatenate(auxword)
auxword=auxword[ilist[:5]]
elif len(auxlist)>0:
auxword+=[np.zeros([5-len(auxword),1],dtype=np.int32)]
auxword=np.concatenate(auxword)
else:
auxword=np.zeros([5,1],dtype=np.int32)
# rtn5.append(auxword)
rtn5[l,:,:]=auxword
if len(auxchoices)>=5:
auxchoices=np.concatenate(auxchoices)
auxchoices=auxchoices[ilist[:5]]
elif len(auxlist)>0:
auxchoices+=[np.zeros([5-len(auxchoices),1],dtype=np.float32)]
auxchoices=np.concatenate(auxchoices)
else:
auxchoices=np.zeros([5,1],dtype=np.float32)
# rtn6.append(auxchoices)
rtn6[l,:,:]=auxchoices
print (nopes,totes,wtf)
if not (aux is None):
# print np.array(rtn6)[-1],np.array(rtn4)[-1]
# return rtn,mask,np.array(rtn3),np.array(rtn4),np.array(rtn5),np.array(rtn6)
return rtn,mask,rtn3,rtn4,rtn5,rtn6
else:
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class Caption_Generator():
def __init__(self, dim_in, dim_embed, dim_hidden, batch_size, n_lstm_steps, n_words, init_b=None,from_image=False,n_input=None,n_lstm_input=None,n_z=None):
self.dim_in = dim_in
self.dim_embed = dim_embed
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.n_words = n_words
self.n_input = n_input
self.n_lstm_input=n_lstm_input
self.n_z=n_z
if from_image:
with open(vgg_path,'rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
self.images = tf.placeholder("float32", [1, 224, 224, 3])
tf.import_graph_def(graph_def, input_map={"images":self.images})
graph = tf.get_default_graph()
self.sess = tf.InteractiveSession(graph=graph)
self.from_image=from_image
# declare the variables to be used for our word embeddings
self.word_embedding = tf.Variable(tf.random_uniform([self.n_z, self.dim_embed], -0.1, 0.1), name='word_embedding')
self.embedding_bias = tf.Variable(tf.zeros([dim_embed]), name='embedding_bias')
# declare the LSTM itself
self.lstm = tf.contrib.rnn.BasicLSTMCell(dim_hidden)
self.dlstm = tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
# declare the variables to be used to embed the image feature embedding to the word embedding space
self.img_embedding = tf.Variable(tf.random_uniform([dim_in, dim_hidden], -0.1, 0.1), name='img_embedding')
self.img_embedding_bias = tf.Variable(tf.zeros([dim_hidden]), name='img_embedding_bias')
# declare the variables to go from an LSTM output to a word encoding output
self.word_encoding = tf.Variable(tf.random_uniform([dim_hidden, self.n_input], -0.1, 0.1), name='word_encoding')
# initialize this bias variable from the preProBuildWordVocab output
# optional initialization setter for encoding bias variable
if init_b is not None:
self.word_encoding_bias = tf.Variable(init_b, name='word_encoding_bias')
else:
self.word_encoding_bias = tf.Variable(tf.zeros([self.n_input]), name='word_encoding_bias')
with tf.device('/cpu:0'):
self.embw=tf.Variable(xavier_init(self.n_input,self.n_z),name='embw')
self.embb=tf.Variable(tf.zeros([self.n_z]),name='embb')
self.all_encoding_weights=[self.embw,self.embb]
self.auxy_in=tf.placeholder(tf.int32,[self.batch_size,2,self.n_lstm_steps])
self.auxy=tf.reshape(self.auxy_in,[self.batch_size*2,-1])
self.Xaux_in=tf.placeholder(tf.int32,[self.batch_size,2,1])
self.Xaux=tf.reshape(self.Xaux_in,[self.batch_size*2])
self.auxmask_in=tf.placeholder(tf.float32,[self.batch_size,2,self.n_lstm_steps])
self.auxmask=tf.reshape(self.auxmask_in,[self.batch_size*2,-1])
self.auxchoices_in=tf.placeholder(tf.float32,[self.batch_size,2,1])
self.auxchoices=tf.reshape(self.auxchoices_in,[self.batch_size*2,-1])
self.flatauxchoices=tf.reshape(self.auxchoices,[-1])
def build_model(self):
# declaring the placeholders for our extracted image feature vectors, our caption, and our mask
# (describes how long our caption is with an array of 0/1 values of length `maxlen`
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
caption_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])
self.output_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
network_weights = self._initialize_weights()
self.network_weights=network_weights
# getting an initial LSTM embedding from our image_imbedding
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
flat_caption_placeholder=tf.reshape(caption_placeholder,[-1])
#leverage one-hot sparsity to lookup embeddings fast
embedded_input,KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],flat_caption_placeholder,logit=True,ret_z=False)
# embedded_input=tf.stop_gradient(embedded_input)
KLD_loss=tf.multiply(KLD_loss,tf.reshape(mask,[-1,1]))
KLD_loss=tf.reduce_sum(KLD_loss)
# KLD_loss=tf.stop_gradient(KLD_loss)
# with tf.device('/cpu:0'):
# word_embeddings=tf.nn.embedding_lookup(self.embw,flat_caption_placeholder)
# word_embeddings+=self.embb
word_embeddings=tf.reshape(embedded_input,[self.batch_size,self.n_lstm_steps,-1])
embedded_input=tf.reshape(embedded_input,[self.batch_size,self.n_lstm_steps,-1])
# embedded_input=tf.nn.l2_normalize(embedded_input,dim=-1)
#initialize lstm state
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
rnn_output=[]
total_loss=0
self.deb=[]
with tf.variable_scope("RNNcapt"):
# unroll lstm
for i in range(self.n_lstm_steps):
if i > 0:
# if this isn’t the first iteration of our LSTM we need to get the word_embedding corresponding
# to the (i-1)th word in our caption
current_embedding = word_embeddings[:,i-1,:]
else:
#if this is the first iteration of our LSTM we utilize the embedded image as our input
current_embedding = image_embedding
if i > 0:
# allows us to reuse the LSTM tensor variable on each iteration
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i>0:
logit=tf.matmul(out,self.word_encoding)+self.word_encoding_bias
# total_loss+=tf.reduce_sum(tf.reduce_sum(tf.square((tf.matmul(out,self.word_encoding)+self.word_encoding_bias)-embedded_input[:,i,:]),axis=-1)*mask[:,i])
# normed_embedding= tf.nn.l2_normalize(out, dim=-1)
# normed_target=tf.nn.l2_normalize(embedded_input[:,i,:],dim=-1)
# cos_sim=tf.multiply(normed_embedding,normed_target)
# cos_sim=(tf.reduce_sum(cos_sim,axis=-1))
# # cos_sim=tf.reshape(cos_sim,[self.batch_size,-1])
# cos_sim=tf.reduce_sum(cos_sim*mask[:,i])
# total_loss+=cos_sim
print logit.shape
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=caption_placeholder[:,i])
xentropy = xentropy * mask[:,i]
xentropy=tf.reduce_sum(xentropy)
total_loss+=xentropy
#perform classification of output
# rnn_output=tf.concat(rnn_output,axis=1)
# rnn_output=tf.reshape(rnn_output,[self.batch_size*(self.n_lstm_steps),-1])
# encoded_output=tf.matmul(rnn_output,self.word_encoding)+self.word_encoding_bias
# encoded_output=tf
# #get loss
# # normed_embedding= tf.nn.l2_normalize(encoded_output, dim=-1)
# # normed_target=tf.nn.l2_normalize(embedded_input,dim=-1)
# # cos_sim=tf.multiply(normed_embedding,normed_target)[:,1:]
# # cos_sim=(tf.reduce_sum(cos_sim,axis=-1))
# # cos_sim=tf.reshape(cos_sim,[self.batch_size,-1])
# # cos_sim=tf.reduce_sum(cos_sim[:,1:]*mask[:,1:])
# # cos_sim=cos_sim/tf.reduce_sum(mask[:,1:])
# # self.exp_loss=tf.reduce_sum((-cos_sim))
# # # self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
# # total_loss = tf.reduce_sum(-(cos_sim))
# mse=tf.reduce_sum(tf.reshape(tf.square(encoded_output-embedded_input),[self.batch_size,self.n_lstm_steps,-1]),axis=-1)[:,1:]*(mask[:,1:])
# mse=tf.reduce_sum(mse)/tf.reduce_sum(mask[:,1:])
#average over timeseries length
# total_loss=tf.reduce_sum(masked_xentropy)/tf.reduce_sum(mask[:,1:])
# total_loss=mse
self.print_loss=total_loss
total_loss+=KLD_loss
total_loss/=tf.reduce_sum(mask[:,1:])
self.print_loss=total_loss
total_loss+=self.get_aux_loss()
return total_loss, img, caption_placeholder, mask
def get_aux_loss(self):
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
network_weights=self.network_weights
seqlen=tf.cast(tf.reduce_sum(self.auxmask,reduction_indices=-1),tf.int32)
KLD_penalty=1e-3
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
if not same_embedding:
input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'])
else:
input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'])
state = self.dlstm.zero_state(self.batch_size*2, dtype=tf.float32)
loss = 0
self.debug=0
probs=[]
with tf.variable_scope("RNN"):
for i in range(self.n_lstm_steps):
if i > 0:
# current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.auxy[:,i-1])
current_embedding=tf.matmul(current_embedding,network_weights['LSTM']['affine_weight'])+network_weights['LSTM']['affine_bias']
# if transfertype2:
# current_embedding=tf.stop_gradient(current_embedding)
loss+=tf.reduce_sum(KLD_loss*self.auxmask[:,i]*self.flatauxchoices)*KLD_penalty
else:
current_embedding = input_embedding
if i > 0:
tf.get_variable_scope().reuse_variables()
out, state = self.dlstm(current_embedding, state)
if i > 0:
onehot=self.auxy[:,i]
logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
# if not use_ctc:
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy = xentropy * self.auxmask[:,i]*self.flatauxchoices
xentropy=tf.reduce_sum(xentropy)
# self.debug+=xentropy
loss += xentropy
self.deb.append(xentropy)
# else:
# probs.append(tf.expand_dims(tf.nn.sigmoid(logit),1))
# if not use_ctc:
# loss_ctc=0
# self.debug=self.debug/tf.reduce_sum(self.mask[:,1:])
# else:
# probs=tf.concat(probs,axis=1)
# probs=ctc_loss.get_output_probabilities(probs,self.auxy[:,1:,:])
# loss_ctc=ctc_loss.loss(probs,self.auxy[:,1:,:],self.n_lstm_steps-2,self.batch_size,seqlen-1)
# self.debug=tf.reduce_sum(input_embedding_KLD_loss)/self.batch_size*KLD_penalty+loss_ctc
self.aux_loss = (loss / tf.reduce_sum(self.auxmask[:,1:]*self.auxchoices))
self.aux_KLD=tf.reduce_sum(input_embedding_KLD_loss*self.flatauxchoices)*KLD_penalty#+loss_ctc
return self.aux_loss+self.aux_KLD
def build_generator(self, maxlen, batchsize=1,from_image=False):
#same setup as `build_model` function
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(batchsize,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(image_embedding, state)
previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(maxlen):
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(previous_word, state)
# get a get maximum probability word and it's encoding from the output of the LSTM
logit = tf.matmul(out, self.word_encoding) + self.word_encoding_bias
best_word = tf.argmax(logit, 1)
with tf.device("/cpu:0"):
# get the embedding of the best_word to use as input to the next iteration of our LSTM
previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
previous_word += self.embedding_bias
all_words.append(best_word)
self.img=img
self.all_words=all_words
return img, all_words
def _initialize_weights(self):
all_weights = dict()
trainability=True
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(self.n_z, self.n_lstm_input),name='affine_weight',trainable=trainability),
'affine_bias': tf.Variable(tf.zeros(self.n_lstm_input),name='affine_bias',trainable=trainability)}
with tf.device('/cpu:0'):
om=tf.Variable(xavier_init(self.n_input, self.n_z),name='out_mean',trainable=trainability)
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability),
'out_log_sigma': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_log_sigmab',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': om,
'out_log_sigma': tf.Variable(xavier_init(self.n_input, self.n_z),name='out_log_sigma',trainable=trainability)}
# self.no_reload+=all_weights['input_meaning'].values()
# self.var_embs=[]
# if transfertype2:
# self.var_embs=all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
# self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
# if lstm_stack>1:
# self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(self.n_z, self.n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(self.n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(self.n_lstm_input,self.n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(self.n_input),name='encoding_bias')
}
all_encoding_weights=[all_weights[x].values() for x in all_weights]
for w in all_encoding_weights:
self.all_encoding_weights+=w
all_weights['LSTM']['lstm']= self.dlstm
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
print self.Xaux.shape
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.Xaux,lookup=True,sample=True)
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False,ret_z=True):
# x=tf.matmul(x,self.embw)+self.embb
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x,lookup=True)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.n_input))
# all_the_f_one_h.append(tf.one_hot(x,depth=self.n_input))
embedding=tf.matmul(z,self.word_embedding)+self.embedding_bias
if ret_z:
embedding=z
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False,sample=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla or sample:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if not vanilla or sample:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if not vanilla or sample:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla or sample:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def crop_image(self,x, target_height=227, target_width=227, as_float=True,from_path=True):
#image preprocessing to crop and resize image
image = (x)
if from_path==True:
image=cv2.imread(image)
if as_float:
image = image.astype(np.float32)
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
height, width, rgb = image.shape
if width == height:
resized_image = cv2.resize(image, (target_height,target_width))
elif height < width:
resized_image = cv2.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = cv2.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return cv2.resize(resized_image, (target_height, target_width))
def read_image(self,path=None):
# parses image from file path and crops/resizes
if path is None:
path=test_image_path
img = crop_image(path, target_height=224, target_width=224)
if img.shape[2] == 4:
img = img[:,:,:3]
img = img[None, ...]
return img
def get_caption(self,x=None):
#gets caption from an image by feeding it through imported VGG16 graph
if self.from_image:
feat = read_image(x)
fc7 = self.sess.run(graph.get_tensor_by_name("import/Relu_1:0"), feed_dict={self.images:feat})
else:
fc7=np.load(x,'r')
generated_word_index= self.sess.run(self.generated_words, feed_dict={self.img:fc7})
generated_word_index = np.hstack(generated_word_index)
generated_words = [ixtoword[x] for x in generated_word_index]
punctuation = np.argmax(np.array(generated_words) == '.')+1
generated_words = generated_words[:punctuation]
generated_sentence = ' '.join(generated_words)
return (generated_sentence)
def get_data(annotation_path, feature_path):
#load training/validation data
annotations = pd.read_table(annotation_path, sep='\t', header=None, names=['image', 'caption'])
return np.load(feature_path,'r'), annotations['caption'].values
def preProBuildWordVocab(sentence_iterator, word_count_threshold=30): # function from Andre Karpathy's NeuralTalk
#process and vectorize training/validation captions
print('preprocessing %d word vocab' % (word_count_threshold, ))
word_counts = {}
nsents = 0
for sent in sentence_iterator:
nsents += 1
for w in sent.lower().split(' '):
word_counts[w] = word_counts.get(w, 0) + 1
vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]
print('preprocessed words %d -> %d' % (len(word_counts), len(vocab)))
ixtoword = {}
ixtoword[0] = '.'
wordtoix = {}
wordtoix['#START#'] = 0
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
word_counts['.'] = nsents
bias_init_vector = np.array([1.0*word_counts[ixtoword[i]] for i in ixtoword])
bias_init_vector /= np.sum(bias_init_vector)
bias_init_vector = np.log(bias_init_vector)
bias_init_vector -= np.max(bias_init_vector)
return wordtoix, ixtoword, bias_init_vector.astype(np.float32)
dim_embed = 256
dim_hidden = 256
dim_in = 4096
batch_size = 5
momentum = 0.9
n_epochs = 3
def train(learning_rate=0.001, continue_training=False):
tf.reset_default_graph()
feats, captions = get_data(annotation_path, feature_path)
wordtoix, ixtoword, init_b = preProBuildWordVocab(captions)
np.save('data/ixtoword', ixtoword)
print ('num words:',len(ixtoword))
sess = tf.InteractiveSession()
n_words = len(wordtoix)
maxlen = 30
X, final_captions, captmask, _map, auxy,auxmask,Xaux,auxchoices = load_text(50000-2,captions)
running_decay=1
decay_rate=0.9999302192204246
# with tf.device('/gpu:0'):
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, batch_size, maxlen+2, n_words, np.zeros(n_input).astype(np.float32),n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
loss, image, sentence, mask = caption_generator.build_model()
saver = tf.train.Saver(max_to_keep=100)
print [x.name for x in tf.trainable_variables()]
caption_generator.all_encoding_weights+=[x for x in tf.trainable_variables() if x.name.startswith('RNN/') ]
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
tf.global_variables_initializer().run()
tf.train.Saver(var_list=caption_generator.all_encoding_weights,max_to_keep=100).restore(sess,tf.train.latest_checkpoint('modelsvarvaedefoh'))
if continue_training:
saver.restore(sess,tf.train.latest_checkpoint(model_path))
losses=[[],[],[],[]]
for epoch in range(n_epochs):
if epoch==1:
for w in caption_generator.all_encoding_weights:
w.trainable=True
index = (np.arange(len(feats)).astype(int))
np.random.shuffle(index)
index=index[:]
i=0
for start, end in zip( range(0, len(index), batch_size), range(batch_size, len(index), batch_size)):
if i%1000==0 and i!=0:
break
#format data batch
current_feats = feats[index[start:end]]
current_captions = captions[index[start:end]]
current_caption_ind = [x for x in map(lambda cap: [wordtoix[word] for word in cap.lower().split(' ')[:-1] if word in wordtoix], current_captions)]
current_caption_matrix = sequence.pad_sequences(current_caption_ind, padding='post', maxlen=maxlen+1)
current_caption_matrix = np.hstack( [np.full( (len(current_caption_matrix),1), 0), current_caption_matrix] )
current_mask_matrix = np.zeros((current_caption_matrix.shape[0], current_caption_matrix.shape[1]))
nonzeros = np.array([x for x in map(lambda x: (x != 0).sum()+2, current_caption_matrix )])
current_capts=final_captions[index[start:end]]
for ind, row in enumerate(current_mask_matrix):
row[:nonzeros[ind]] = 1
current_mask_matrix=captmask[index[start:end]]
_, loss_value,total_loss,aux_KLD,aux_loss = sess.run([train_op, caption_generator.print_loss,loss,caption_generator.aux_KLD,caption_generator.aux_loss], feed_dict={
image: current_feats.astype(np.float32),
caption_generator.output_placeholder : current_caption_matrix.astype(np.int32),
mask : current_mask_matrix.astype(np.float32),
sentence : current_capts.astype(np.float32),
caption_generator.auxy_in:auxy[index[start:end]][:,:2],
caption_generator.Xaux_in:Xaux[index[start:end]][:,:2],
caption_generator.auxmask_in:auxmask[index[start:end]][:,:2],
caption_generator.auxchoices_in:auxchoices[index[start:end]][:,:2]
})
print("Current Cost: ", loss_value, "\t Epoch {}/{}".format(epoch, n_epochs), "\t Iter {}/{}".format(start,len(feats)))
losses[0].append(loss_value)
losses[1].append(aux_loss)
losses[2].append(aux_KLD)
losses[3].append(total_loss)
# losses.append(loss_value*running_decay)
# if epoch<9:
# if i%3==0:
# running_decay*=decay_rate
# else:
# if i%8==0:
# running_decay*=decay_rate
i+=1
print [x[-1] for x in losses]
# print deb
print("Saving the model from epoch: ", epoch)
pkl.dump(losses,open('losses/loss_e2e_aux_init.pkl','wb'))
saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)
learning_rate *= 0.95
def test(sess,image,generated_words,ixtoword,idx=0): # Naive greedy search
feats, captions = get_data(annotation_path, feature_path)
feat = np.array([feats[idx]])
saver = tf.train.Saver()
sanity_check= False
# sanity_check=True
if not sanity_check:
saved_path=tf.train.latest_checkpoint(model_path)
saver.restore(sess, saved_path)
else:
tf.global_variables_initializer().run()
generated_word_index= sess.run(generated_words, feed_dict={image:feat})
generated_word_index = np.hstack(generated_word_index)
generated_sentence = [ixtoword[x] for x in generated_word_index]
print(generated_sentence)
if __name__=='__main__':
model_path = './models/tensorflow_aux_init'
feature_path = './data/feats.npy'
annotation_path = './data/results_20130124.token'
import sys
feats, captions = get_data(annotation_path, feature_path)
n_input=50000
binary_dim=n_input
n_lstm_input=256
n_z=500
zero_end_tok=True
form2=True
vanilla=True
onehot=False
same_embedding=False
if sys.argv[1]=='train':
train()
elif sys.argv[1]=='test':
ixtoword = np.load('data/ixtoword.npy').tolist()
n_words = len(ixtoword)
maxlen=15
sess = tf.InteractiveSession()
batch_size=1
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, 1, maxlen+2, n_words,n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
image, generated_words = caption_generator.build_generator(maxlen=maxlen)
test(sess,image,generated_words,ixtoword,1)
|
dricciardelli/vae2vec
|
capt_gen_aux_e2o.py
|
Python
|
mit
| 41,104
|
[
"Gaussian"
] |
ccabfa26e99e4123be00454cf648cfa02f383376ead3b3950acc19d168372463
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UFlow: Unsupervised Optical Flow.
This library provides a simple interface for training and inference.
"""
import math
import sys
import time
import gin
import tensorflow as tf
from uflow import uflow_model
from uflow import uflow_utils
@gin.configurable
class UFlow(object):
"""Simple interface with infer and train methods."""
def __init__(
self,
checkpoint_dir='',
summary_dir='',
optimizer='adam',
learning_rate=0.0002,
only_forward=False,
level1_num_layers=3,
level1_num_filters=32,
level1_num_1x1=0,
dropout_rate=.25,
build_selfsup_transformations=None,
fb_sigma_teacher=0.003,
fb_sigma_student=0.03,
train_with_supervision=False,
train_with_gt_occlusions=False,
smoothness_edge_weighting='gaussian',
teacher_image_version='original',
stop_gradient_mask=True,
selfsup_mask='gaussian',
normalize_before_cost_volume=True,
original_layer_sizes=False,
shared_flow_decoder=False,
channel_multiplier=1,
use_cost_volume=True,
use_feature_warp=True,
num_levels=5,
accumulate_flow=True,
occlusion_estimation='wang',
occ_weights=None,
occ_thresholds=None,
occ_clip_max=None,
smoothness_at_level=2,
use_bfloat16=False,
):
"""Instantiate a UFlow model.
Args:
checkpoint_dir: str, location to checkpoint model
summary_dir: str, location to write tensorboard summary
optimizer: str, identifier of which optimizer to use
learning_rate: float, learning rate to use for training
only_forward: bool, if True, only infer flow in one direction
level1_num_layers: int, pwc architecture property
level1_num_filters: int, pwc architecture property
level1_num_1x1: int, pwc architecture property
dropout_rate: float, how much dropout to use with pwc net
build_selfsup_transformations: list of functions which transform the flow
predicted from the raw images to be in the frame of images transformed
by geometric_augmentation_fn
fb_sigma_teacher: float, controls how much forward-backward flow
consistency is needed by the teacher model in order to supervise the
student
fb_sigma_student: float, controls how much forward-backward consistency is
needed by the student model in order to not receive supervision from the
teacher model
train_with_supervision: bool, Whether to train with ground truth flow,
currently not supported
train_with_gt_occlusions: bool, if True, use ground truth occlusions
instead of predicted occlusions during training. Only works with Sintel
which has dense ground truth occlusions.
smoothness_edge_weighting: str, controls how smoothness penalty is
determined
teacher_image_version: str, which image to give to teacher model
stop_gradient_mask: bool, whether to stop the gradient of photometric loss
through the occlusion mask.
selfsup_mask: str, type of selfsupervision mask to use
normalize_before_cost_volume: bool, toggles pwc architecture property
original_layer_sizes: bool, toggles pwc architecture property
shared_flow_decoder: bool, toogles pwc architecutre property
channel_multiplier: int, channel factor to use in pwc
use_cost_volume: bool, toggles pwc architecture property
use_feature_warp: bool, toggles pwc architecture property
num_levels: int, how many pwc pyramid layers to use
accumulate_flow: bool, toggles pwc architecture property
occlusion_estimation: which type of occlusion estimation to use
occ_weights: dict of string -> float indicating how to weight occlusions
occ_thresholds: dict of str -> float indicating thresholds to apply for
occlusions
occ_clip_max: dict of string -> float indicating how to clip occlusion
smoothness_at_level: int, which level to compute smoothness on
use_bfloat16: bool, whether to run in bfloat16 mode.
Returns:
Uflow object instance.
"""
self._only_forward = only_forward
self._build_selfsup_transformations = build_selfsup_transformations
self._fb_sigma_teacher = fb_sigma_teacher
self._fb_sigma_student = fb_sigma_student
self._train_with_supervision = train_with_supervision
self._train_with_gt_occlusions = train_with_gt_occlusions
self._smoothness_edge_weighting = smoothness_edge_weighting
self._smoothness_at_level = smoothness_at_level
self._teacher_flow_model = None
self._teacher_feature_model = None
self._teacher_image_version = teacher_image_version
self._stop_gradient_mask = stop_gradient_mask
self._selfsup_mask = selfsup_mask
self._num_levels = num_levels
self._feature_model = uflow_model.PWCFeaturePyramid(
level1_num_layers=level1_num_layers,
level1_num_filters=level1_num_filters,
level1_num_1x1=level1_num_1x1,
original_layer_sizes=original_layer_sizes,
num_levels=num_levels,
channel_multiplier=channel_multiplier,
pyramid_resolution='half',
use_bfloat16=use_bfloat16)
self._flow_model = uflow_model.PWCFlow(
dropout_rate=dropout_rate,
normalize_before_cost_volume=normalize_before_cost_volume,
num_levels=num_levels,
use_feature_warp=use_feature_warp,
use_cost_volume=use_cost_volume,
channel_multiplier=channel_multiplier,
accumulate_flow=accumulate_flow,
use_bfloat16=use_bfloat16,
shared_flow_decoder=shared_flow_decoder)
# By default, the teacher flow and featuure models are the same as
# the student flow and feature models.
self._teacher_flow_model = self._flow_model
self._teacher_feature_model = self._feature_model
self._learning_rate = learning_rate
self._optimizer_type = optimizer
self._make_or_reset_optimizer()
# Set up checkpointing.
self._make_or_reset_checkpoint()
self.update_checkpoint_dir(checkpoint_dir)
# Set up tensorboard log files.
self.summary_dir = summary_dir
if self.summary_dir:
self.writer = tf.compat.v1.summary.create_file_writer(summary_dir)
self.writer.set_as_default()
self._occlusion_estimation = occlusion_estimation
if occ_weights is None:
occ_weights = {
'fb_abs': 1.0,
'forward_collision': 1.0,
'backward_zero': 10.0
}
self._occ_weights = occ_weights
if occ_thresholds is None:
occ_thresholds = {
'fb_abs': 1.5,
'forward_collision': 0.4,
'backward_zero': 0.25
}
self._occ_thresholds = occ_thresholds
if occ_clip_max is None:
occ_clip_max = {'fb_abs': 10.0, 'forward_collision': 5.0}
self._occ_clip_max = occ_clip_max
def set_teacher_models(self, teacher_feature_model, teacher_flow_model):
self._teacher_feature_model = teacher_feature_model
self._teacher_flow_model = teacher_flow_model
@property
def feature_model(self):
return self._feature_model
@property
def flow_model(self):
return self._flow_model
def update_checkpoint_dir(self, checkpoint_dir):
"""Changes the checkpoint directory for saving and restoring."""
self._manager = tf.train.CheckpointManager(
self._checkpoint, directory=checkpoint_dir, max_to_keep=1)
def restore(self, reset_optimizer=False, reset_global_step=False):
"""Restores a saved model from a checkpoint."""
status = self._checkpoint.restore(self._manager.latest_checkpoint)
try:
status.assert_existing_objects_matched()
except AssertionError as e:
print('Error while attempting to restore UFlow models:', e)
if reset_optimizer:
self._make_or_reset_optimizer()
self._make_or_reset_checkpoint()
if reset_global_step:
tf.compat.v1.train.get_or_create_global_step().assign(0)
def save(self):
"""Saves a model checkpoint."""
self._manager.save()
def _make_or_reset_optimizer(self):
if self._optimizer_type == 'adam':
self._optimizer = tf.compat.v1.train.AdamOptimizer(
self._learning_rate, name='Optimizer')
elif self._optimizer_type == 'sgd':
self._optimizer = tf.compat.v1.train.GradientDescentOptimizer(
self._learning_rate, name='Optimizer')
else:
raise ValueError('Optimizer "{}" not yet implemented.'.format(
self._optimizer_type))
@property
def optimizer(self):
return self._optimizer
def _make_or_reset_checkpoint(self):
self._checkpoint = tf.train.Checkpoint(
optimizer=self._optimizer,
feature_model=self._feature_model,
flow_model=self._flow_model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step())
# Use of tf.function breaks exporting the model, see b/138864493
def infer_no_tf_function(self,
image1,
image2,
input_height=None,
input_width=None,
resize_flow_to_img_res=True,
infer_occlusion=False):
"""Infer flow for two images.
Args:
image1: tf.tensor of shape [height, width, 3].
image2: tf.tensor of shape [height, width, 3].
input_height: height at which the model should be applied if different
from image height.
input_width: width at which the model should be applied if different from
image width
resize_flow_to_img_res: bool, if True, return the flow resized to the same
resolution as (image1, image2). If False, return flow at the whatever
resolution the model natively predicts it.
infer_occlusion: bool, if True, return both flow and a soft occlusion
mask, else return just flow.
Returns:
Optical flow for each pixel in image1 pointing to image2.
"""
results = self.batch_infer_no_tf_function(
tf.stack([image1, image2])[None],
input_height=input_height,
input_width=input_width,
resize_flow_to_img_res=resize_flow_to_img_res,
infer_occlusion=infer_occlusion)
# Remove batch dimension from all results.
if isinstance(results, (tuple, list)):
return [x[0] for x in results]
else:
return results[0]
def batch_infer_no_tf_function(self,
images,
input_height=None,
input_width=None,
resize_flow_to_img_res=True,
infer_occlusion=False):
"""Infers flow from two images.
Args:
images: tf.tensor of shape [batchsize, 2, height, width, 3].
input_height: height at which the model should be applied if different
from image height.
input_width: width at which the model should be applied if different from
image width
resize_flow_to_img_res: bool, if True, return the flow resized to the same
resolution as (image1, image2). If False, return flow at the whatever
resolution the model natively predicts it.
infer_occlusion: bool, if True, return both flow and a soft occlusion
mask, else return just flow.
Returns:
Optical flow for each pixel in image1 pointing to image2.
"""
batch_size, seq_len, orig_height, orig_width, image_channels = images.shape.as_list(
)
if input_height is None:
input_height = orig_height
if input_width is None:
input_width = orig_width
# Ensure a feasible computation resolution. If specified size is not
# feasible with the model, change it to a slightly higher resolution.
divisible_by_num = pow(2.0, self._num_levels)
if (input_height % divisible_by_num != 0 or
input_width % divisible_by_num != 0):
print('Cannot process images at a resolution of ' + str(input_height) +
'x' + str(input_width) + ', since the height and/or width is not a '
'multiple of ' + str(divisible_by_num) + '.')
# compute a feasible resolution
input_height = int(
math.ceil(float(input_height) / divisible_by_num) * divisible_by_num)
input_width = int(
math.ceil(float(input_width) / divisible_by_num) * divisible_by_num)
print('Inference will be run at a resolution of ' + str(input_height) +
'x' + str(input_width) + '.')
# Resize images to desired input height and width.
if input_height != orig_height or input_width != orig_width:
images = uflow_utils.resize(
images, input_height, input_width, is_flow=False)
# Flatten images by folding sequence length into the batch dimension, apply
# the feature network and undo the flattening.
images_flattened = tf.reshape(
images,
[batch_size * seq_len, input_height, input_width, image_channels])
# noinspection PyCallingNonCallable
features_flattened = self._feature_model(
images_flattened, split_features_by_sample=False)
features = [
tf.reshape(f, [batch_size, seq_len] + f.shape.as_list()[1:])
for f in features_flattened
]
features1, features2 = [[f[:, i] for f in features] for i in range(2)]
# Compute flow in frame of image1.
# noinspection PyCallingNonCallable
flow = self._flow_model(features1, features2, training=False)[0]
if infer_occlusion:
# noinspection PyCallingNonCallable
flow_backward = self._flow_model(features2, features1, training=False)[0]
occlusion_mask = self.infer_occlusion(flow, flow_backward)
occlusion_mask = uflow_utils.resize(
occlusion_mask, orig_height, orig_width, is_flow=False)
# Resize and rescale flow to original resolution. This always needs to be
# done because flow is generated at a lower resolution.
if resize_flow_to_img_res:
flow = uflow_utils.resize(flow, orig_height, orig_width, is_flow=True)
if infer_occlusion:
return flow, occlusion_mask
return flow
@tf.function
def infer(self,
image1,
image2,
input_height=None,
input_width=None,
resize_flow_to_img_res=True,
infer_occlusion=False):
return self.infer_no_tf_function(image1, image2, input_height, input_width,
resize_flow_to_img_res, infer_occlusion)
@tf.function
def batch_infer(self,
images,
input_height=None,
input_width=None,
resize_flow_to_img_res=True,
infer_occlusion=False):
return self.batch_infer_no_tf_function(images, input_height, input_width,
resize_flow_to_img_res,
infer_occlusion)
def infer_occlusion(self, flow_forward, flow_backward):
"""Gets a 'soft' occlusion mask from the forward and backward flow."""
flows = {
(0, 1, 'inference'): [flow_forward],
(1, 0, 'inference'): [flow_backward],
}
_, _, _, occlusion_masks, _, _ = uflow_utils.compute_warps_and_occlusion(
flows,
self._occlusion_estimation,
self._occ_weights,
self._occ_thresholds,
self._occ_clip_max,
occlusions_are_zeros=False)
occlusion_mask_forward = occlusion_masks[(0, 1, 'inference')][0]
return occlusion_mask_forward
def features_no_tf_function(self, image1, image2):
"""Runs the feature extractor portion of the model on image1 and image2."""
images = tf.stack([image1, image2])
# noinspection PyCallingNonCallable
return self._feature_model(images, split_features_by_sample=True)
@tf.function
def features(self, image1, image2):
"""Runs the feature extractor portion of the model on image1 and image2."""
return self.features_no_tf_function(image1, image2)
def train_step_no_tf_function(self,
batch,
weights=None,
plot_dir=None,
distance_metrics=None,
ground_truth_flow=None,
ground_truth_valid=None,
ground_truth_occlusions=None,
images_without_photo_aug=None,
occ_active=None):
"""Perform single gradient step."""
if weights is None:
weights = {
'smooth2': 2.0,
'edge_constant': 100.0,
'census': 1.0,
}
else:
# Support values and callables (e.g. to compute weights from global step).
weights = {k: v() if callable(v) else v for k, v in weights.items()}
losses, gradients, variables = self._loss_and_grad(
batch,
weights,
plot_dir,
distance_metrics=distance_metrics,
ground_truth_flow=ground_truth_flow,
ground_truth_valid=ground_truth_valid,
ground_truth_occlusions=ground_truth_occlusions,
images_without_photo_aug=images_without_photo_aug,
occ_active=occ_active)
self._optimizer.apply_gradients(
list(zip(gradients, variables)),
global_step=tf.compat.v1.train.get_or_create_global_step())
return losses
@tf.function
def train_step(self,
batch,
weights=None,
distance_metrics=None,
ground_truth_flow=None,
ground_truth_valid=None,
ground_truth_occlusions=None,
images_without_photo_aug=None,
occ_active=None):
"""Performs a train step on the batch."""
return self.train_step_no_tf_function(
batch,
weights,
distance_metrics=distance_metrics,
ground_truth_flow=ground_truth_flow,
ground_truth_valid=ground_truth_valid,
ground_truth_occlusions=ground_truth_occlusions,
images_without_photo_aug=images_without_photo_aug,
occ_active=occ_active)
def train(self,
data_it,
num_steps,
weights=None,
progress_bar=True,
plot_dir=None,
distance_metrics=None,
occ_active=None):
"""Trains flow from a data iterator for a number of gradient steps.
Args:
data_it: tf.data.Iterator that produces tensors of shape [b,3,h,w,3].
num_steps: int, number of gradient steps to train for.
weights: dictionary with weight for each loss.
progress_bar: boolean flag for continuous printing of a progress bar.
plot_dir: location to plot results or None
distance_metrics: dictionary of which type of distance metric to use for
photometric losses
occ_active: dictionary of which occlusion types are active
Returns:
a dict that contains all losses.
"""
# Log dictionary for storing losses of this epoch.
log = dict()
# Support constant lr values and callables (for learning rate schedules).
if callable(self._learning_rate):
log['learning-rate'] = self._learning_rate()
else:
log['learning-rate'] = self._learning_rate
start_time_data = time.time()
for _, batch in zip(range(num_steps), data_it):
stop_time_data = time.time()
if progress_bar:
sys.stdout.write('.')
sys.stdout.flush()
# Split batch into images, occlusion masks, and ground truth flow.
images, labels = batch
ground_truth_flow = labels.get('flow_uv', None)
ground_truth_valid = labels.get('flow_valid', None)
ground_truth_occlusions = labels.get('occlusions', None)
images_without_photo_aug = labels.get('images_without_photo_aug', None)
start_time_train_step = time.time()
# Use tf.function unless intermediate results have to be plotted.
if plot_dir is None:
# Perform a gradient step (optimized by tf.function).
losses = self.train_step(
images,
weights,
distance_metrics=distance_metrics,
ground_truth_flow=ground_truth_flow,
ground_truth_valid=ground_truth_valid,
ground_truth_occlusions=ground_truth_occlusions,
images_without_photo_aug=images_without_photo_aug,
occ_active=occ_active)
else:
# Perform a gradient step without tf.function to allow plotting.
losses = self.train_step_no_tf_function(
images,
weights,
plot_dir,
distance_metrics=distance_metrics,
ground_truth_flow=ground_truth_flow,
ground_truth_valid=ground_truth_valid,
ground_truth_occlusions=ground_truth_occlusions,
images_without_photo_aug=images_without_photo_aug,
occ_active=occ_active)
stop_time_train_step = time.time()
log_update = losses
# Compute time in ms.
log_update['data-time'] = (stop_time_data - start_time_data) * 1000
log_update['train-time'] = (stop_time_train_step -
start_time_train_step) * 1000
# Log losses and times.
for key in log_update:
if key in log:
log[key].append(log_update[key])
else:
log[key] = [log_update[key]]
if self.summary_dir:
tf.summary.scalar(key, log[key])
# Set start time for data gathering to measure data pipeline efficiency.
start_time_data = time.time()
for key in log:
log[key] = tf.reduce_mean(input_tensor=log[key])
if progress_bar:
sys.stdout.write('\n')
sys.stdout.flush()
return log
def _loss_and_grad(self,
batch,
weights,
plot_dir=None,
distance_metrics=None,
ground_truth_flow=None,
ground_truth_valid=None,
ground_truth_occlusions=None,
images_without_photo_aug=None,
occ_active=None):
"""Apply the model on the data in batch and compute the loss.
Args:
batch: tf.tensor of shape [b, seq, h, w, c] that holds a batch of image
sequences.
weights: dictionary with float entries per loss.
plot_dir: str, directory to plot images
distance_metrics: dict, which distance metrics to use,
ground_truth_flow: Tensor, optional ground truth flow for first image
ground_truth_valid: Tensor, indicates locations where gt flow is valid
ground_truth_occlusions: Tensor, optional ground truth occlusions for
computing loss. If None, predicted occlusions will be used.
images_without_photo_aug: optional images without any photometric
augmentation applied. Will be used for computing photometric losses if
provided.
occ_active: optional dict indicating which occlusion methods are active
Returns:
A tuple consisting of a tf.scalar that represents the total loss for the
current batch, a list of gradients, and a list of the respective
variables.
"""
with tf.GradientTape() as tape:
losses = self.compute_loss(
batch,
weights,
plot_dir,
distance_metrics=distance_metrics,
ground_truth_flow=ground_truth_flow,
ground_truth_valid=ground_truth_valid,
ground_truth_occlusions=ground_truth_occlusions,
images_without_photo_aug=images_without_photo_aug,
occ_active=occ_active)
variables = (
self._feature_model.trainable_variables +
self._flow_model.trainable_variables)
grads = tape.gradient(losses['total-loss'], variables)
return losses, grads, variables
def compute_loss(self,
batch,
weights,
plot_dir=None,
distance_metrics=None,
ground_truth_flow=None,
ground_truth_valid=None,
ground_truth_occlusions=None,
images_without_photo_aug=None,
occ_active=None):
"""Applies the model and computes losses for a batch of image sequences."""
# Compute only a supervised loss.
if self._train_with_supervision:
if ground_truth_flow is None:
raise ValueError('Need ground truth flow to compute supervised loss.')
flows = uflow_utils.compute_flow_for_supervised_loss(
self._feature_model, self._flow_model, batch=batch, training=True)
losses = uflow_utils.supervised_loss(weights, ground_truth_flow,
ground_truth_valid, flows)
losses = {key + '-loss': losses[key] for key in losses}
return losses
# Use possibly augmented images if non augmented version is not provided.
if images_without_photo_aug is None:
images_without_photo_aug = batch
flows, selfsup_transform_fns = uflow_utils.compute_features_and_flow(
self._feature_model,
self._flow_model,
batch=batch,
batch_without_aug=images_without_photo_aug,
training=True,
build_selfsup_transformations=self._build_selfsup_transformations,
teacher_feature_model=self._teacher_feature_model,
teacher_flow_model=self._teacher_flow_model,
teacher_image_version=self._teacher_image_version,
)
# Prepare images for unsupervised loss (prefer unaugmented images).
images = dict()
seq_len = int(batch.shape[1])
images = {i: images_without_photo_aug[:, i] for i in range(seq_len)}
# Warp stuff and compute occlusion.
warps, valid_warp_masks, _, not_occluded_masks, fb_sq_diff, fb_sum_sq = uflow_utils.compute_warps_and_occlusion(
flows,
occlusion_estimation=self._occlusion_estimation,
occ_weights=self._occ_weights,
occ_thresholds=self._occ_thresholds,
occ_clip_max=self._occ_clip_max,
occlusions_are_zeros=True,
occ_active=occ_active)
# Warp images and features.
warped_images = uflow_utils.apply_warps_stop_grad(images, warps, level=0)
# Compute losses.
losses = uflow_utils.compute_loss(
weights=weights,
images=images,
flows=flows,
warps=warps,
valid_warp_masks=valid_warp_masks,
not_occluded_masks=not_occluded_masks,
fb_sq_diff=fb_sq_diff,
fb_sum_sq=fb_sum_sq,
warped_images=warped_images,
only_forward=self._only_forward,
selfsup_transform_fns=selfsup_transform_fns,
fb_sigma_teacher=self._fb_sigma_teacher,
fb_sigma_student=self._fb_sigma_student,
plot_dir=plot_dir,
distance_metrics=distance_metrics,
smoothness_edge_weighting=self._smoothness_edge_weighting,
stop_gradient_mask=self._stop_gradient_mask,
selfsup_mask=self._selfsup_mask,
ground_truth_occlusions=ground_truth_occlusions,
smoothness_at_level=self._smoothness_at_level)
losses = {key + '-loss': losses[key] for key in losses}
return losses
|
google-research/google-research
|
uflow/uflow_net.py
|
Python
|
apache-2.0
| 27,801
|
[
"Gaussian"
] |
7f04cb2469e22a55b158426e9aa0b40b511d9098857f99116c773aa08c20253c
|
#!/usr/bin/env python2
# Make.py tool for managing packages and their auxiliary libs,
# auto-editing machine Makefiles, and building LAMMPS
# Syntax: Make.py -h (for help)
# Notes: needs python 2.7 (not Python 3)
import sys,os,commands,re,copy
# switch abbrevs
# switch classes = created class for each switch
# lib classes = auxiliary package libs
# build classes = build options with defaults
# make classes = makefile options with no defaults
# setargs = makefile settings
# actionargs = allowed actions (also lib-dir and machine)
abbrevs = "adhjmoprsv"
switchclasses = ("actions","dir","help","jmake","makefile",
"output","packages","redo","settings","verbose")
libclasses = ("atc","awpmd","colvars","cuda","gpu",
"meam","poems","qmmm","reax","voronoi")
buildclasses = ("intel","kokkos")
makeclasses = ("cc","mpi","fft","jpg","png")
setargs = ("gzip","#gzip","ffmpeg","#ffmpeg","smallbig","bigbig","smallsmall")
actionargs = ("lib-all","file","clean","exe")
# ----------------------------------------------------------------
# functions
# ----------------------------------------------------------------
# if flag = 1, print str and exit
# if flag = 0, print str as warning and do not exit
def error(str,flag=1):
if flag:
print "ERROR:",str
sys.exit()
else:
print "WARNING:",str
# store command-line args as sw = dict of key/value
# key = switch word, value = list of following args
# order = list of switches in order specified
# enforce no switch more than once
def parse_args(args):
narg = len(args)
sw = {}
order = []
iarg = 0
while iarg < narg:
if args[iarg][0] != '-': error("Arg %s is not a switch" % args[iarg])
switch = args[iarg][1:]
if switch in sw: error("Duplicate switch %s" % args[iarg])
order.append(switch)
first = iarg+1
last = first
while last < narg and args[last][0] != '-': last += 1
sw[switch] = args[first:last]
iarg = last
return sw,order
# convert info in switches dict back to a string, in switch_order
def switch2str(switches,switch_order):
txt = ""
for switch in switch_order:
if txt: txt += ' '
txt += "-%s" % switch
txt += ' ' + ' '.join(switches[switch])
return txt
# check if compiler works with ccflags on dummy one-line tmpauto.cpp file
# return 1 if successful, else 0
# warn = 1 = print warning if not successful, warn = 0 = no warning
# NOTE: unrecognized -override-limits can leave verride-limits file
def compile_check(compiler,ccflags,warn):
open("tmpauto.cpp",'w').write("int main(int, char **) {}\n")
str = "%s %s -c tmpauto.cpp" % (compiler,ccflags)
txt = commands.getoutput(str)
flag = 1
if txt or not os.path.isfile("tmpauto.o"):
flag = 0
if warn:
print str
if txt: print txt
else: print "compile produced no output"
os.remove("tmpauto.cpp")
if os.path.isfile("tmpauto.o"): os.remove("tmpauto.o")
return flag
# check if linker works with linkflags on tmpauto.o file
# return 1 if successful, else 0
# warn = 1 = print warning if not successful, warn = 0 = no warning
def link_check(linker,linkflags,warn):
open("tmpauto.cpp",'w').write("int main(int, char **) {}\n")
str = "%s %s -o tmpauto tmpauto.cpp" % (linker,linkflags)
txt = commands.getoutput(str)
flag = 1
if txt or not os.path.isfile("tmpauto"):
flag = 0
if warn:
print str
if txt: print txt
else: print "link produced no output"
os.remove("tmpauto.cpp")
if os.path.isfile("tmpauto"): os.remove("tmpauto")
return flag
# ----------------------------------------------------------------
# switch classes, one per single-letter switch
# ----------------------------------------------------------------
# actions
class Actions:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-a action1 action2 ...
possible actions = lib-all, lib-dir, file, clean, exe or machine
machine is a Makefile.machine suffix
actions can be specified in any order
each action can appear only once
lib-dir can appear multiple times for different dirs
some actions depend on installed packages
installed packages = currently installed + result of -p switch
actions are invoked in this order, independent of specified order
(1) lib-all or lib-dir = build auxiliary libraries
lib-all builds all auxiliary libs needed by installed packages
lib-dir builds a specific lib whether package installed or not
dir is any dir in lib directory (atc, cuda, meam, etc) except linalg
(2) file = create src/MAKE/MINE/Makefile.auto
use -m switch for Makefile.machine to start from,
else use existing Makefile.auto
adds settings needed for installed accelerator packages
existing Makefile.auto is NOT changed unless "file" action is specified
(3) clean = invoke "make clean-auto" to insure full build
useful if compiler flags have changed
(4) exe or machine = build LAMMPS
machine can be any existing Makefile.machine suffix
machine is converted to "exe" action, as well as:
"-m machine" is added if -m switch is not specified
"-o machine" is added if -o switch is not specified
if either "-m" or "-o" are specified, they are not overridden
does not invoke any lib builds, since libs could be previously built
exe always builds using src/MAKE/MINE/Makefile.auto
if file action also specified, it creates Makefile.auto
else if -m switch specified,
existing Makefile.machine is copied to create Makefile.auto
else Makefile.auto must already exist and is not changed
produces src/lmp_auto, or error message if unsuccessful
use -o switch to copy src/lmp_auto to new filename
"""
def check(self):
if not self.inlist: error("-a args are invalid")
alist = []
machine = 0
nlib = 0
for one in self.inlist:
if one in alist: error("An action is duplicated")
if one.startswith("lib-"):
lib = one[4:]
if lib != "all" and lib not in libclasses: error("Actions are invalid")
alist.insert(nlib,one)
nlib += 1
elif one == "file":
if nlib == 0: alist.insert(0,"file")
else: alist.insert(1,"file")
elif one == "clean":
if nlib == 0: alist.insert(0,"clean")
elif "file" not in alist: alist.insert(1,"clean")
else: alist.insert(2,"clean")
elif one == "exe":
if machine == 0: alist.append("exe")
else: error("Actions are invalid")
machine = 1
# one action can be unknown in case is a machine (checked in setup)
elif machine == 0:
alist.append(one)
machine = 1
else: error("Actions are invalid")
self.alist = alist
# dedup list of actions concatenated from two lists
# current self.inlist = specified -a switch + redo command -a switch
# specified exe/machine action replaces redo exe/machine action
# operates on and replaces self.inlist
def dedup(self):
alist = []
exemachine = 0
for one in self.inlist:
if one == "exe" or (one not in actionargs and not one.startswith("lib-")):
if exemachine: continue
exemachine = 1
if one not in alist: alist.append(one)
self.inlist = alist
# if last action is unknown, assume machine and convert to exe
# only done if action is a suffix for an existing Makefile.machine
# return machine if conversion done, else None
def setup(self):
machine = self.alist[-1]
if machine in actionargs or machine.startswith("lib-"): return None
make = MakeReader(machine,2)
self.alist[-1] = "exe"
return machine
# build one or more auxiliary package libraries
def lib(self,suffix):
if suffix != "all":
print "building",suffix,"library ..."
str = "%s.build()" % suffix
exec(str)
else:
final = packages.final
for one in packages.lib:
if final[one]:
if "user" in one: pkg = one[5:]
else: pkg = one
print "building",pkg,"library ..."
str = "%s.build()" % pkg
exec(str)
# read Makefile.machine
# if caller = "file", edit via switches
# if caller = "exe", just read
# write out new Makefile.auto
def file(self,caller):
# if caller = "file", create from mpi or read from makefile.machine or auto
# if caller = "exe" and "file" action already invoked, read from auto
# if caller = "exe" and no "file" action, read from makefile.machine or auto
if caller == "file":
if makefile and makefile.machine == "none":
if cc and mpi: machine = "mpi"
else: error("Cannot create makefile unless -cc and -mpi are used")
elif makefile: machine = makefile.machine
else: machine = "auto"
elif caller == "exe" and "file" in self.alist:
machine = "auto"
elif caller == "exe" and "file" not in self.alist:
if makefile and makefile.machine == "none":
error("Cannot build with makefile = none")
elif makefile: machine = makefile.machine
else: machine = "auto"
make = MakeReader(machine,1)
# change makefile settings to user specifications
precompiler = ""
if caller == "file":
# add compiler/linker and default CCFLAGS,LINKFLAGS
# if cc.wrap, add wrapper setting for mpi = ompi/mpich
# precompiler = env variable setting for OpenMPI wrapper compiler
if cc:
make.setvar("CC",cc.compiler)
make.setvar("LINK",cc.compiler)
if cc.wrap:
if cc.wrap == "nvcc":
wrapper = os.path.abspath("../lib/kokkos/config/nvcc_wrapper")
else: wrapper = cc.wrap
abbrev = cc.abbrev
if abbrev == "mpi":
txt = commands.getoutput("mpicxx -show")
if "-lmpich" in txt:
make.addvar("CC","-cxx=%s" % wrapper)
make.addvar("LINK","-cxx=%s" % wrapper)
elif "-lmpi" in txt:
make.addvar("OMPI_CXX",wrapper,"cc")
precompiler = "env OMPI_CXX=%s " % wrapper
else: error("Could not add MPI wrapper compiler, " +
"did not recognize OpenMPI or MPICH")
make.setvar("CCFLAGS","-g")
make.addvar("CCFLAGS","-O3")
make.setvar("LINKFLAGS","-g")
make.addvar("LINKFLAGS","-O")
# add MPI settings
if mpi:
make.delvar("MPI_INC","*")
make.delvar("MPI_PATH","*")
make.delvar("MPI_LIB","*")
if mpi.style == "mpi":
make.addvar("MPI_INC","-DMPICH_SKIP_MPICXX")
make.addvar("MPI_INC","-DOMPI_SKIP_MPICXX=1")
elif mpi.style == "mpich":
make.addvar("MPI_INC","-DMPICH_SKIP_MPICXX")
make.addvar("MPI_INC","-DOMPI_SKIP_MPICXX=1")
if mpi.dir: make.addvar("MPI_INC","-I%s/include" % mpi.dir)
if mpi.dir: make.addvar("MPI_PATH","-L%s/lib" % mpi.dir)
make.addvar("MPI_LIB","-lmpich")
make.addvar("MPI_LIB","-lmpl")
make.addvar("MPI_LIB","-lpthread")
elif mpi.style == "ompi":
make.addvar("MPI_INC","-DMPICH_SKIP_MPICXX")
make.addvar("MPI_INC","-DOMPI_SKIP_MPICXX=1")
if mpi.dir: make.addvar("MPI_INC","-I%s/include" % mpi.dir)
if mpi.dir: make.addvar("MPI_PATH","-L%s/lib" % mpi.dir)
make.addvar("MPI_LIB","-lmpi")
make.addvar("MPI_LIB","-lmpi_cxx")
elif mpi.style == "serial":
make.addvar("MPI_INC","-I../STUBS")
make.addvar("MPI_PATH","-L../STUBS")
make.addvar("MPI_LIB","-lmpi_stubs")
# add accelerator package CCFLAGS and LINKFLAGS and variables
compiler = precompiler + ' '.join(make.getvar("CC"))
linker = precompiler + ' '.join(make.getvar("LINK"))
final = packages.final
if final["opt"]:
if compile_check(compiler,"-restrict",0):
make.addvar("CCFLAGS","-restrict")
if final["user-omp"]:
if compile_check(compiler,"-restrict",0):
make.addvar("CCFLAGS","-restrict")
if compile_check(compiler,"-fopenmp",1):
make.addvar("CCFLAGS","-fopenmp")
make.addvar("LINKFLAGS","-fopenmp")
if final["user-intel"]:
if intel.mode == "cpu":
if compile_check(compiler,"-fopenmp",1):
make.addvar("CCFLAGS","-fopenmp")
make.addvar("LINKFLAGS","-fopenmp")
make.addvar("CCFLAGS","-DLAMMPS_MEMALIGN=64")
if compile_check(compiler,"-restrict",1):
make.addvar("CCFLAGS","-restrict")
if compile_check(compiler,"-xHost",1):
make.addvar("CCFLAGS","-xHost")
make.addvar("LINKFLAGS","-xHost")
if compile_check(compiler,"-fno-alias",1):
make.addvar("CCFLAGS","-fno-alias")
if compile_check(compiler,"-ansi-alias",1):
make.addvar("CCFLAGS","-ansi-alias")
if compile_check(compiler,"-override-limits",1):
make.addvar("CCFLAGS","-override-limits")
make.delvar("CCFLAGS","-DLMP_INTEL_OFFLOAD")
make.delvar("LINKFLAGS","-offload")
elif intel.mode == "phi":
if compile_check(compiler,"-fopenmp",1):
make.addvar("CCFLAGS","-fopenmp")
make.addvar("LINKFLAGS","-fopenmp")
make.addvar("CCFLAGS","-DLAMMPS_MEMALIGN=64")
if compile_check(compiler,"-restrict",1):
make.addvar("CCFLAGS","-restrict")
if compile_check(compiler,"-xHost",1):
make.addvar("CCFLAGS","-xHost")
make.addvar("CCFLAGS","-DLMP_INTEL_OFFLOAD")
if compile_check(compiler,"-fno-alias",1):
make.addvar("CCFLAGS","-fno-alias")
if compile_check(compiler,"-ansi-alias",1):
make.addvar("CCFLAGS","-ansi-alias")
if compile_check(compiler,"-override-limits",1):
make.addvar("CCFLAGS","-override-limits")
if compile_check(compiler,'-offload-option,mic,compiler,' +
'"-fp-model fast=2 -mGLOB_default_function_attrs=' +
'\\"gather_scatter_loop_unroll=4\\""',1):
make.addvar("CCFLAGS",'-offload-option,mic,compiler,' +
'"-fp-model fast=2 -mGLOB_default_function_attrs=' +
'\\"gather_scatter_loop_unroll=4\\""')
if link_check(linker,"-offload",1):
make.addvar("LINKFLAGS","-offload")
if final["kokkos"]:
if kokkos.mode == "omp":
make.delvar("KOKKOS_DEVICES","*")
make.delvar("KOKKOS_ARCH","*")
make.addvar("KOKKOS_DEVICES","OpenMP","lmp")
elif kokkos.mode == "cuda":
make.delvar("KOKKOS_DEVICES","*")
make.delvar("KOKKOS_ARCH","*")
make.addvar("KOKKOS_DEVICES","Cuda, OpenMP","lmp")
if kokkos.arch[0] == "3":
make.addvar("KOKKOS_ARCH","Kepler" + kokkos.arch,"lmp")
elif kokkos.arch[0] == "2":
make.addvar("KOKKOS_ARCH","Fermi" + kokkos.arch,"lmp")
elif kokkos.mode == "phi":
make.delvar("KOKKOS_DEVICES","*")
make.delvar("KOKKOS_ARCH","*")
make.addvar("KOKKOS_DEVICES","OpenMP","lmp")
make.addvar("KOKKOS_ARCH","KNC","lmp")
# add LMP settings
if settings:
list = settings.inlist
for one in list:
if one == "gzip": make.addvar("LMP_INC","-DLAMMPS_GZIP")
elif one == "#gzip": make.delvar("LMP_INC","-DLAMMPS_GZIP")
elif one == "ffmpeg": make.addvar("LMP_INC","-DLAMMPS_FFMPEG")
elif one == "#ffmpeg": make.delvar("LMP_INC","-DLAMMPS_FFMPEG")
elif one == "smallbig":
make.delvar("LMP_INC","-DLAMMPS_BIGBIG")
make.delvar("LMP_INC","-DLAMMPS_SMALLSMALL")
elif one == "bigbig":
make.delvar("LMP_INC","-DLAMMPS_SMALLBIG")
make.delvar("LMP_INC","-DLAMMPS_SMALLSMALL")
make.addvar("LMP_INC","-DLAMMPS_BIGBIG")
elif one == "smallsmall":
make.delvar("LMP_INC","-DLAMMPS_SMALLBIG")
make.delvar("LMP_INC","-DLAMMPS_BIGBIG")
make.addvar("LMP_INC","-DLAMMPS_SMALLSMALL")
# add FFT, JPG, PNG settings
if fft:
make.delvar("FFT_INC","*")
make.delvar("FFT_PATH","*")
make.delvar("FFT_LIB","*")
if fft.mode == "none": make.addvar("FFT_INC","-DFFT_NONE")
else:
make.addvar("FFT_INC","-DFFT_%s" % fft.mode.upper())
make.addvar("FFT_LIB",fft.lib)
if fft.dir:
make.addvar("FFT_INC","-I%s/include" % fft.dir)
make.addvar("FFT_PATH","-L%s/lib" % fft.dir)
else:
if fft.incdir: make.addvar("FFT_INC","-I%s" % fft.incdir)
if fft.libdir: make.addvar("FFT_PATH","-L%s" % fft.libdir)
if jpg:
if jpg.on == 0:
make.delvar("LMP_INC","-DLAMMPS_JPEG")
make.delvar("JPG_LIB","-ljpeg")
else:
make.addvar("LMP_INC","-DLAMMPS_JPEG")
make.addvar("JPG_LIB","-ljpeg")
if jpg.dir:
make.addvar("JPG_INC","-I%s/include" % jpg.dir)
make.addvar("JPG_PATH","-L%s/lib" % jpg.dir)
else:
if jpg.incdir: make.addvar("JPG_INC","-I%s" % jpg.incdir)
if jpg.libdir: make.addvar("JPG_PATH","-L%s" % jpg.libdir)
if png:
if png.on == 0:
make.delvar("LMP_INC","-DLAMMPS_PNG")
make.delvar("JPG_LIB","-lpng")
else:
make.addvar("LMP_INC","-DLAMMPS_PNG")
make.addvar("JPG_LIB","-lpng")
if png.dir:
make.addvar("JPG_INC","-I%s/include" % png.dir)
make.addvar("JPG_PATH","-L%s/lib" % png.dir)
else:
if png.incdir: make.addvar("JPG_INC","-I%s" % png.incdir)
if png.libdir: make.addvar("JPG_PATH","-L%s" % png.libdir)
# set self.stubs if Makefile.auto uses STUBS lib in MPI settings
if "-lmpi_stubs" in make.getvar("MPI_LIB"): self.stubs = 1
else: self.stubs = 0
# write out Makefile.auto
# unless caller = "exe" and "file" action already invoked
if caller == "file" or "file" not in self.alist:
make.write("%s/MAKE/MINE/Makefile.auto" % dir.src,1)
print "Created src/MAKE/MINE/Makefile.auto"
# test full compile and link
# unless caller = "file" and "exe" action will be invoked later
if caller == "file" and "exe" in self.alist: return
compiler = precompiler + ' '.join(make.getvar("CC"))
ccflags = ' '.join(make.getvar("CCFLAGS"))
linker = precompiler + ' '.join(make.getvar("LINK"))
linkflags = ' '.join(make.getvar("LINKFLAGS"))
if not compile_check(compiler,ccflags,1):
error("Test of compilation failed")
if not link_check(linker,linkflags,1): error("Test of link failed")
# invoke "make clean-auto" to force clean before build
def clean(self):
str = "cd %s; make clean-auto" % dir.src
commands.getoutput(str)
if verbose: print "Performed make clean-auto"
# build LAMMPS using Makefile.auto and -j setting
# invoke self.file() first, to test makefile compile/link
# delete existing lmp_auto, so can detect if build fails
# build STUBS lib (if unbuilt) if Makefile.auto MPI settings need it
def exe(self):
self.file("exe")
commands.getoutput("cd %s; rm -f lmp_auto" % dir.src)
if self.stubs and not os.path.isfile("%s/STUBS/libmpi_stubs.a" % dir.src):
print "building serial STUBS library ..."
str = "cd %s/STUBS; make clean; make" % dir.src
txt = commands.getoutput(str)
if not os.path.isfile("%s/STUBS/libmpi_stubs.a" % dir.src):
print txt
error('Unsuccessful "make stubs"')
print "Created src/STUBS/libmpi_stubs.a"
if jmake: str = "cd %s; make -j %d auto" % (dir.src,jmake.n)
else: str = "cd %s; make auto" % dir.src
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/lmp_auto" % dir.src):
if not verbose: print txt
error('Unsuccessful "make auto"')
elif not output: print "Created src/lmp_auto"
# dir switch
class Dir:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
def help(self):
return """
-d dir
dir = LAMMPS home dir
if -d not specified, working dir must be lammps/src
"""
def check(self):
if self.inlist != None and len(self.inlist) != 1:
error("-d args are invalid")
# if inlist = None, check that cwd = lammps/src
# store cwd and lammps dir
# derive src,make,lib dirs from lammps dir
# check that they all exist
def setup(self):
self.cwd = os.getcwd()
if self.inlist == None: self.lammps = ".."
else: self.lammps = self.inlist[0]
self.lammps = os.path.realpath(self.lammps)
self.src = self.lammps + "/src"
self.make = self.lammps + "/src/MAKE"
self.lib = self.lammps + "/lib"
if not os.path.isdir(self.lammps): error("LAMMPS home dir is invalid")
if not os.path.isdir(self.src): error("LAMMPS src dir is invalid")
if not os.path.isdir(self.lib): error("LAMMPS lib dir is invalid")
# help switch
class Help:
def __init__(self,list): pass
def help(self):
return """
Syntax: Make.py switch args ...
switches can be listed in any order
help switch:
-h prints help and syntax for all other specified switches
switch for actions:
-a lib-all, lib-dir, clean, file, exe or machine
list one or more actions, in any order
machine is a Makefile.machine suffix
one-letter switches:
-d (dir), -j (jmake), -m (makefile), -o (output),
-p (packages), -r (redo), -s (settings), -v (verbose)
switches for libs:
-atc, -awpmd, -colvars, -cuda
-gpu, -meam, -poems, -qmmm, -reax, -voronoi
switches for build and makefile options:
-intel, -kokkos, -cc, -mpi, -fft, -jpg, -png
"""
# jmake switch
class Jmake:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-j N
use N procs for performing parallel make commands
used when building a lib or LAMMPS itself
if -j not specified, serial make commands run on single core
"""
def check(self):
if len(self.inlist) != 1: error("-j args are invalid")
if not self.inlist[0].isdigit(): error("-j args are invalid")
n = int(self.inlist[0])
if n <= 0: error("-j args are invalid")
self.n = n
# makefile switch
class Makefile:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-m machine
use Makefile.machine under src/MAKE as starting point to create Makefile.auto
if machine = "none", file action will create Makefile.auto from scratch
must use -cc and -mpi switches to specify compiler and MPI
if -m not specified, file/exe actions alter existing Makefile.auto
"""
def check(self):
if len(self.inlist) != 1: error("-m args are invalid")
self.machine = self.inlist[0]
# output switch
class Output:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-o machine
copy final src/lmp_auto to lmp_machine in working dir
if -o not specified, exe action only produces src/lmp_auto
"""
def check(self):
if len(self.inlist) != 1: error("-o args are invalid")
self.machine = self.inlist[0]
# packages switch
class Packages:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
def help(self):
return """
-p = package1 package2 ...
list of packages to install or uninstall in order specified
operates on set of packages currently installed
valid package names:
and LAMMPS standard or user package (type "make package" to see list)
prefix by yes/no to install/uninstall (see abbrevs)
yes-molecule, yes-user-atc, no-molecule, no-user-atc
can use LAMMPS categories (type "make package" to see list)
all = all standard and user packages (also none = no-all)
std (or standard) = all standard packages
user = all user packages
lib = all standard and user packages with auxiliary libs
can abbreviate package names and yes/no
omp = user-omp = yes-user-omp
^omp = ^user-omp = no-user-omp
user = yes-user, ^user = no-user
all = yes-all, ^all = none = no-all
when action performed, list is processed in order,
as if typed "make yes/no" for each
if "orig" or "original" is last package in list,
set of installed packages will be restored to original (current) list
after "build" action is performed
if -p not specified, currently installed packages are not changed
"""
def check(self):
if self.inlist != None and not self.inlist: error("-p args are invalid")
def setup(self):
# extract package lists from src/Makefile
# remove names from lib that there are not Make.py lib-classes for
# most don't actually have libs, so nothing to control from Make.py
make = MakeReader("%s/Makefile" % dir.src)
std = make.getvar("PACKAGE")
user = make.getvar("PACKUSER")
lib = make.getvar("PACKLIB")
lib.remove("kim")
lib.remove("kokkos")
lib.remove("user-molfile")
lib.remove("python")
lib.remove("user-quip")
all = std + user
# plist = command line args expanded to yes-package or no-package
plist = []
if self.inlist:
for one in self.inlist:
if one in std:
plist.append("yes-%s" % one)
elif one in user:
plist.append("yes-%s" % one)
elif "user-"+one in user:
plist.append("yes-user-%s" % one)
elif one == "std" or one == "standard" or one == "user" or \
one == "lib" or one == "all": plist.append("yes-%s" % one)
elif one.startswith("yes-"):
if one[4:] in std: plist.append("yes-%s" % one[4:])
elif one[4:] in user: plist.append("yes-%s" % one[4:])
elif "user-"+one[4:] in user: plist.append("yes-user-%s" % one[4:])
elif one == "yes-std" or one == "yes-standard" or \
one == "yes-user" or one == "yes-lib" or one == "yes-all":
plist.append("yes-%s" % one[4:])
else: error("Invalid package name %s" % one)
elif one.startswith("no-"):
if one[3:] in std: plist.append("no-%s" % one[3:])
elif one[3:] in user: plist.append("no-%s" % one[3:])
elif "user-"+one[3:] in user: plist.append("no-user-%s" % one[3:])
elif one == "no-std" or one == "no-standard" or one == "no-user" or \
one == "no-lib" or one == "no-all":
plist.append("no-%s" % one[3:])
else: error("Invalid package name %s" % one)
elif one.startswith('^'):
if one[1:] in std: plist.append("no-%s" % one[1:])
elif one[1:] in user: plist.append("no-%s" % one[1:])
elif "user-"+one[1:] in user: plist.append("no-user-%s" % one[1:])
elif one == "^std" or one == "^standard" or one == "^user" or \
one == "^lib" or one == "^all": plist.append("no-%s" % one[1:])
else: error("Invalid package name %s" % one)
elif one == "none": plist.append("no-all")
elif one == "orig": plist.append(one)
else: error("Invalid package name %s" % one)
if "orig" in plist and plist.index("orig") != len(plist)-1:
error('-p orig arg must be last')
if plist.count("orig") > 1: error('-p orig arg must be last')
# original = dict of all packages
# key = package name, value = 1 if currently installed, else 0
original = {}
str = "cd %s; make ps" % dir.src
output = commands.getoutput(str).split('\n')
pattern = "Installed\s+(\w+): package (\S+)"
for line in output:
m = re.search(pattern,line)
if not m: continue
pkg = m.group(2).lower()
if pkg not in all: error('Package list does not math "make ps" results')
if m.group(1) == "NO": original[pkg] = 0
elif m.group(1) == "YES": original[pkg] = 1
# final = dict of all packages after plist applied to original
# key = package name, value = 1 if installed, else 0
final = copy.deepcopy(original)
for i,one in enumerate(plist):
if "yes" in one:
pkg = one[4:]
yes = 1
else:
pkg = one[3:]
yes = 0
if pkg in all:
final[pkg] = yes
elif pkg == "std":
for pkg in std: final[pkg] = yes
elif pkg == "user":
for pkg in user: final[pkg] = yes
elif pkg == "lib":
for pkg in lib: final[pkg] = yes
elif pkg == "all":
for pkg in all: final[pkg] = yes
self.std = std
self.user = user
self.lib = lib
self.all = all
self.plist = plist
self.original = original
self.final = final
# install packages in plist
def install(self):
if self.plist: print "Installing packages ..."
for one in self.plist:
if one == "orig": continue
commands.getoutput("cd %s; make %s" % (dir.src,one))
if self.plist and verbose:
txt = commands.getoutput("cd %s; make ps" % dir.src)
print "Package status after installation:"
print txt
# restore packages to original list if requested
# order of re-install should not matter matter b/c of Depend.sh
def uninstall(self):
if not self.plist or self.plist[-1] != "orig": return
print "Restoring packages to original state ..."
commands.getoutput("cd %s; make no-all" % dir.src)
for one in self.all:
if self.original[one]:
commands.getoutput("cd %s; make yes-%s" % (dir.src,one))
if verbose:
txt = commands.getoutput("cd %s; make ps" % dir.src)
print "Restored package status:"
print txt
# redo switch
class Redo:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-r file label1 label2 ...
all args are optional
invoke Make.py commands from a file
other specified switches are merged with file commands (see below)
redo file format:
blank lines and lines starting with "#" are skipped
other lines are treated as commands
each command is a list of Make.py args, as if typed at command-line
commands can have leading label, followed by ":"
commands cannot contain a "-r" switch
if no args, execute previous command, which is stored in src/Make.py.last
if one arg, execute all commands from specified file
unlabeled or labeled commands are all executed
if multiple args, execute only matching labeled commands from file
if other switches are specified,
if file command does not have the switch, it is added
if file command has the switch, the specified switch replaces it
except if -a (action) switch is both specified and in the file command,
two sets of actions are merged and duplicates removed
if both switches have "exe or machine" action,
the specified exe/machine overrides the file exe/machine
"""
def check(self):
if len(self.inlist) == 0:
self.dir = 1
self.file = "Make.py.last"
self.labels = []
else:
self.dir = 0
self.file = self.inlist[0]
self.labels = self.inlist[1:]
# read redo file
# self.commands = list of commands to execute
def setup(self):
file = self.file
if not os.path.isfile(file): error("Redo file %s does not exist" % file)
lines = open(file,'r').readlines()
cmdlines = []
for line in lines:
line = line.strip()
if not line or line[0] == '#' : continue
cmdlines.append(line)
# if no labels, add all file commands to command list
# if labels, make a dict with key = label, value = command
# and discard unlabeled commands
dict = {}
commands = []
for line in cmdlines:
words = line.split()
if "-r" in words: error("Redo command cannot contain -r switch")
if words[0][-1] == ':': label = words[0][:-1]
else: label = None
if not self.labels:
if label: commands.append(' '.join(words[1:]))
else: commands.append(line)
else:
if not label: continue
dict[label] = ' '.join(words[1:])
# extract labeled commands from dict and add to command list
for label in self.labels:
if label not in dict: error("Redo label not in redo file")
commands.append(dict[label])
self.commands = commands
# settings switch
class Settings:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-s set1 set2 ...
possible settings = gzip smallbig bigbig smallsmall
add each setting as LAMMPS setting to created Makefile.auto
if -s not specified, no settings are changed in Makefile.auto
"""
def check(self):
if not self.inlist: error("-s args are invalid")
for one in self.inlist:
if one not in setargs: error("-s args are invalid")
# verbose switch
class Verbose:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-v (no arguments)
produce verbose output as Make.py executes
if -v not specified, minimal output is produced
"""
def check(self):
if len(self.inlist): error("-v args are invalid")
# ----------------------------------------------------------------
# lib classes, one per LAMMPS auxiliary lib
# ----------------------------------------------------------------
# ATC lib
class ATC:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "g++"
self.lammpsflag = 0
def help(self):
return """
-atc make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = g++)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-atc args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-atc args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-atc args are invalid")
def build(self):
libdir = dir.lib + "/atc"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libatc.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/atc library")
else: print "Created lib/atc library"
# AWPMD lib
class AWPMD:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "mpicc"
self.lammpsflag = 0
def help(self):
return """
-awpmd make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = mpicc)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-awpmd args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-awpmd args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-awpmd args are invalid")
def build(self):
libdir = dir.lib + "/awpmd"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libawpmd.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/awpmd library")
else: print "Created lib/awpmd library"
# COLVARS lib
class COLVARS:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "g++"
self.lammpsflag = 0
def help(self):
return """
-colvars make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = g++)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-colvars args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-colvars args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-colvars args are invalid")
def build(self):
libdir = dir.lib + "/colvars"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libcolvars.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/colvars library")
else: print "Created lib/colvars library"
# CUDA lib
class CUDA:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.mode = "double"
self.arch = "31"
def help(self):
return """
-cuda mode=double arch=31
all args are optional and can be in any order
mode = double or mixed or single (def = double)
arch = M (def = 31)
M = 31 for Kepler
M = 20 for CC2.0 (GF100/110, e.g. C2050,GTX580,GTX470)
M = 21 for CC2.1 (GF104/114, e.g. GTX560, GTX460, GTX450)
M = 13 for CC1.3 (GF200, e.g. C1060, GTX285)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-cuda args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-cuda args are invalid")
if words[0] == "mode": self.mode = words[1]
elif words[0] == "arch": self.arch = words[1]
else: error("-cuda args are invalid")
if self.mode != "double" and self.mode != "mixed" and \
self.mode != "single":
error("-cuda args are invalid")
if not self.arch.isdigit(): error("-cuda args are invalid")
def build(self):
libdir = dir.lib + "/cuda"
commands.getoutput("cd %s; make clean" % libdir)
if self.mode == "double": n = 2
elif self.mode == "mixed": n = 3
elif self.mode == "single": n = 1
if jmake: str = "cd %s; make -j %d precision=%d arch=%s" % \
(libdir,jmake.n,n,self.arch)
else: str = str = "cd %s; make precision=%d arch=%s" % \
(libdir,n,self.arch)
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/liblammpscuda.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/cuda library")
else: print "Created lib/cuda library"
# GPU lib
class GPU:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "linux.double"
self.lammpsflag = self.modeflag = self.archflag = 0
def help(self):
return """
-gpu make=suffix lammps=suffix2 mode=double arch=N
all args are optional and can be in any order
make = use Makefile.suffix (def = linux.double)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
mode = double or mixed or single (def = CUDA_PREC in makefile)
arch = 31 (Kepler) or 21 (Fermi) (def = CUDA_ARCH in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-gpu args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-gpu args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
elif words[0] == "mode":
self.mode = words[1]
self.modeflag = 1
elif words[0] == "arch":
self.arch = words[1]
self.archflag = 1
else: error("-gpu args are invalid")
if self.modeflag and (self.mode != "double" and
self.mode != "mixed" and
self.mode != "single"):
error("-gpu args are invalid")
if self.archflag and not self.arch.isdigit():
error("-gpu args are invalid")
def build(self):
libdir = dir.lib + "/gpu"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.modeflag:
if self.mode == "double":
make.setvar("CUDA_PRECISION","-D_DOUBLE_DOUBLE")
elif self.mode == "mixed":
make.setvar("CUDA_PRECISION","-D_SINGLE_DOUBLE")
elif self.mode == "single":
make.setvar("CUDA_PRECISION","-D_SINGLE_SINGLE")
if self.archflag:
make.setvar("CUDA_ARCH","-arch=sm_%s" % self.arch)
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libgpu.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/gpu library")
else: print "Created lib/gpu library"
# MEAM lib
class MEAM:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "gfortran"
self.lammpsflag = 0
def help(self):
return """
-meam make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = gfortran)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-meam args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-meam args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-meam args are invalid")
def build(self):
libdir = dir.lib + "/meam"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
# do not use -j for MEAM build, parallel build does not work
str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libmeam.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/meam library")
else: print "Created lib/meam library"
# POEMS lib
class POEMS:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "g++"
self.lammpsflag = 0
def help(self):
return """
-poems make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = g++)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-poems args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-poems args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-poems args are invalid")
def build(self):
libdir = dir.lib + "/poems"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libpoems.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/poems library")
else: print "Created lib/poems library"
# QMMM lib
class QMMM:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "gfortran"
self.lammpsflag = 0
def help(self):
return """
-qmmm make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = gfortran)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-qmmm args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-qmmm args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-qmmm args are invalid")
def build(self):
libdir = dir.lib + "/qmmm"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libqmmm.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/qmmm library")
else: print "Created lib/qmmm library"
# REAX lib
class REAX:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "gfortran"
self.lammpsflag = 0
def help(self):
return """
-reax make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = gfortran)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-reax args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-reax args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-reax args are invalid")
def build(self):
libdir = dir.lib + "/reax"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libreax.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/reax library")
else: print "Created lib/reax library"
# VORONOI lib
class VORONOI:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.install = ""
def help(self):
return """
-voronoi install="-d dir -v version -g -b -i installdir -l incdir libdir"
arg is optional, only needed if want to run install.py script
install = args to use with lib/voronoi/install.py script
must enclose in quotes since install.py args have switches
install.py can download, build, install, setup links to the Voro++ library
see lib/voronoi/README for details on Voro++ and using install.py
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-voronoi args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-voronoi args are invalid")
if words[0] == "install": self.install = words[1]
else: error("-voronoi args are invalid")
def build(self):
if not self.install: return
libdir = dir.lib + "/voronoi"
cmd = "cd %s; python install.py %s" % (libdir,self.install)
txt = commands.getoutput(cmd)
if verbose: print txt
print "Created lib/voronoi library"
# ----------------------------------------------------------------
# build classes for intel, kokkos build options
# ----------------------------------------------------------------
# Intel class
class Intel:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.mode = "cpu"
def help(self):
return """
-intel mode
mode = cpu or phi (def = cpu)
build Intel package for CPU or Xeon Phi
"""
def check(self):
if self.inlist == None: return
if len(self.inlist) != 1: error("-intel args are invalid")
self.mode = self.inlist[0]
if self.mode != "cpu" and self.mode != "phi":
error("-intel args are invalid")
# Kokkos class
class Kokkos:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.mode = ""
self.archflag = 0
def help(self):
return """
-kokkos mode arch=N
mode is not optional, arch is optional
mode = omp or cuda or phi (def = KOKKOS_DEVICES setting in Makefile )
build Kokkos package for omp or cuda or phi
set KOKKOS_DEVICES to "OpenMP" (omp, phi) or "Cuda, OpenMP" (cuda)
arch = 31 (Kepler) or 21 (Fermi) (def = -arch setting in Makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-kokkos args are invalid")
if self.inlist == None: return
if len(self.inlist) < 1: error("-kokkos args are invalid")
self.mode = self.inlist[0]
if self.mode != "omp" and self.mode != "cuda" and self.mode != "phi":
error("-kokkos args are invalid")
for one in self.inlist[1:]:
words = one.split('=')
if len(words) != 2: error("-kokkos args are invalid")
if words[0] == "arch":
self.arch = words[1]
self.archflag = 1
else: error("-kokkos args are invalid")
# ----------------------------------------------------------------
# makefile classes for CC, MPI, JPG, PNG, FFT settings
# ----------------------------------------------------------------
# Cc class
class Cc:
def __init__(self,list):
self.inlist = list[:]
self.compiler = self.abbrev = ""
self.wrap = ""
def help(self):
return """
-cc compiler wrap=wcompiler
change CC setting in makefile
compiler is required, all other args are optional
compiler = any string with g++ or icc or icpc
or mpi (or mpicxx, mpiCC, mpiicpc, etc)
can be compiler name or full path to compiler
mpi by itself is changed to mpicxx
wcompiler = compiler for mpi wrapper to use
use nvcc for building for Kokkos/cuda with provided nvcc_wrapper
"""
def check(self):
if len(self.inlist) < 1: error("-cc args are invalid")
self.compiler = self.inlist[0]
if self.compiler == "mpi":
self.compiler = "mpicxx"
self.abbrev = "mpi"
elif self.compiler.startswith("mpi"):
self.abbrev = "mpi"
elif self.compiler == "g++" or self.compiler == "icc" or \
self.compiler == "icpc":
self.abbrev = self.compiler
elif "mpi" in self.compiler: self.abbrev = "mpi"
elif "g++" in self.compiler: self.abbrev = "g++"
elif "icc" in self.compiler: self.abbrev = "icc"
elif "icpc" in self.compiler: self.abbrev = "icpc"
else: error("-cc args are invalid")
for one in self.inlist[1:]:
words = one.split('=')
if len(words) != 2: error("-cc args are invalid")
if words[0] == "wrap":
if self.abbrev != "mpi": error("-cc compiler is not a wrapper")
self.wrap = words[1]
else: error("-cc args are invalid")
# Mpi class
class Mpi:
def __init__(self,list):
self.inlist = list[:]
self.style = self.dir = ""
def help(self):
return """
-mpi style dir=path
change MPI settings in makefile
style is required, all other args are optional
style = mpi or mpich or ompi or serial
mpi = no MPI settings (assume compiler is MPI wrapper)
mpich = use explicit settings for MPICH
ompi = use explicit settings for OpenMPI
serial = use settings for src/STUBS library
dir = path for MPICH or OpenMPI directory
add -I and -L settings for include and lib sub-dirs
"""
def check(self):
if len(self.inlist) < 1: error("-mpi args are invalid")
self.style = self.inlist[0]
if self.style != "mpi" and self.style != "mpich" and \
self.style != "ompi" and self.style != "serial":
error("-mpi args are invalid")
for one in self.inlist[1:]:
words = one.split('=')
if len(words) != 2: error("-mpi args are invalid")
if words[0] == "dir": self.dir = words[1]
else: error("-mpi args are invalid")
# Fft class
class Fft:
def __init__(self,list):
self.inlist = list[:]
self.dir = self.incdir = self.libdir = ""
def help(self):
return """
-fft mode lib=libname dir=homedir idir=incdir ldir=libdir
change FFT settings in makefile
mode is required, all other args are optional
removes all current FFT variable settings
mode = none or fftw or fftw3 of ...
adds -DFFT_MODE setting
lib = name of FFT library to link with (def is libname = mode)
adds -lliblib setting, e.g. -llibfftw3
dir = home dir for include and library files (def = none)
adds -Idir/include and -Ldir/lib settings
if set, overrides idir and ldir args
idir = dir for include file (def = none)
adds -Iidir setting
ldir = dir for library file (def = none)
adds -Lldir setting
"""
def check(self):
if not len(self.inlist): error("-fft args are invalid")
self.mode = self.inlist[0]
self.lib = "-l%s" % self.mode
for one in self.inlist[1:]:
words = one.split('=')
if len(words) != 2: error("-fft args are invalid")
if words[0] == "lib": self.lib = "-l%s" % words[1]
elif words[0] == "dir": self.dir = words[1]
elif words[0] == "idir": self.incdir = words[1]
elif words[0] == "ldir": self.libdir = words[1]
else: error("-fft args are invalid")
# Jpg class
class Jpg:
def __init__(self,list):
self.inlist = list[:]
self.on = 1
self.dir = self.incdir = self.libdir = ""
def help(self):
return """
-jpg flag dir=homedir idir=incdir ldir=libdir
change JPG settings in makefile
all args are optional, flag must come first if specified
flag = yes or no (def = yes)
include or exclude JPEG support
adds/removes -DLAMMPS_JPEG and -ljpeg settings
dir = home dir for include and library files (def = none)
adds -Idir/include and -Ldir/lib settings
if set, overrides idir and ldir args
idir = dir for include file (def = none)
adds -Iidir setting
ldir = dir for library file (def = none)
adds -Lldir setting
"""
def check(self):
for i,one in enumerate(self.inlist):
if one == "no" and i == 0: self.on = 0
elif one == "yes" and i == 0: self.on = 1
else:
words = one.split('=')
if len(words) != 2: error("-jpeg args are invalid")
if words[0] == "dir": self.dir = words[1]
elif words[0] == "idir": self.incdir = words[1]
elif words[0] == "ldir": self.libdir = words[1]
else: error("-jpeg args are invalid")
# Png class
class Png:
def __init__(self,list):
self.inlist = list[:]
self.on = 1
self.dir = self.incdir = self.libdir = ""
def help(self):
return """
-png flag dir=homedir idir=incdir ldir=libdir
change PNG settings in makefile
all args are optional, flag must come first if specified
flag = yes or no (def = yes)
include or exclude PNG support
adds/removes -DLAMMPS_PNG and -lpng settings
dir = home dir for include and library files (def = none)
adds -Idir/include and -Ldir/lib settings
if set, overrides idir and ldir args
idir = dir for include file (def = none)
adds -Iidir setting
ldir = dir for library file (def = none)
adds -Lldir setting
"""
def check(self):
for i,one in enumerate(self.inlist):
if one == "no" and i == 0: self.on = 0
elif one == "yes" and i == 0: self.on = 1
else:
words = one.split('=')
if len(words) != 2: error("-png args are invalid")
if words[0] == "dir": self.dir = words[1]
elif words[0] == "idir": self.incdir = words[1]
elif words[0] == "ldir": self.libdir = words[1]
else: error("-png args are invalid")
# ----------------------------------------------------------------
# auxiliary classes
# ----------------------------------------------------------------
# read, tweak, and write a Makefile
class MakeReader:
# read a makefile
# flag = 0 if file is full path name
# flag = 1,2 if file is suffix for any Makefile.machine under src/MAKE
# look for this file in same order that src/Makefile does
# if flag = 1, read the file
# if flag = 2, just check if file exists
def __init__(self,file,flag=0):
if flag == 0:
if not os.path.isfile(file): error("Makefile %s does not exist" % file)
lines = open(file,'r').readlines()
else:
mfile = "%s/MAKE/MINE/Makefile.%s" % (dir.src,file)
if not os.path.isfile(mfile):
mfile = "%s/MAKE/Makefile.%s" % (dir.src,file)
if not os.path.isfile(mfile):
mfile = "%s/MAKE/OPTIONS/Makefile.%s" % (dir.src,file)
if not os.path.isfile(mfile):
mfile = "%s/MAKE/MACHINES/Makefile.%s" % (dir.src,file)
if not os.path.isfile(mfile):
error("Makefile.%s does not exist" % file)
if flag == 1: lines = open(mfile,'r').readlines()
else: return
# scan lines of makefile
# if not a variable line, just copy to newlines
# if a variable line, concatenate any continuation lines
# convert variable to var dict entry: key = name, value = list of words
# discard any portion of value string with a comment char
# varinfo = list of variable info: (name, name with whitespace for print)
# add index into varinfo to newlines
# ccindex = index of "CC =" line, to add OMPI var before it
# lmpindex = index of "LAMMPS-specific settings" line to add KOKKOS vars before it
var = {}
varinfo = []
newlines = []
pattern = "(\S+\s+=\s+)(.*)"
conditional = 0
multiline = 0
self.ccindex = self.lmpindex = 0
for line in lines:
line = line[:-1]
if "CC =" in line: self.ccindex = len(newlines)
if "LAMMPS-specific settings" in line: self.lmpindex = len(newlines)
if "ifeq" in line:
conditional = 1
continue
if conditional:
if "endif" in line:
conditional = 0
continue
if multiline:
if '#' in line: line = line[:line.find('#')]
morevalues = line.split()
values = values[:-1] + morevalues
if values[-1] != '\\':
var[name] = values
multiline = 0
newlines.append(str(len(varinfo)))
varinfo.append((name,namewhite))
continue
varflag = 1
if len(line.strip()) == 0: varflag = 0
elif line.lstrip()[0] == '#': varflag = 0
else:
m = re.match(pattern,line)
if not m: varflag = 0
if varflag:
namewhite = m.group(1)
name = namewhite.split()[0]
if name in var:
error("Makefile variable %s appears more than once" % name)
remainder = m.group(2)
if '#' in remainder: remainder = remainder[:remainder.find('#')]
values = remainder.split()
if values and values[-1] == '\\': multiline = 1
else:
var[name] = values
newlines.append(str(len(varinfo)))
varinfo.append((name,namewhite))
else:
newlines.append(line)
self.var = var
self.varinfo = varinfo
self.lines = newlines
# return list of values associated with var
# return None if var not defined
def getvar(self,var):
if var in self.var: return self.var[var]
else: return None
# set var to single value
# if var not defined, error
def setvar(self,var,value):
if var not in self.var: error("Variable %s not in makefile" % var)
self.var[var] = [value]
# add value to var
# do not add if value already defined by var
# if var not defined,
# create new variable using "where"
# where="cc", line before "CC =" line, use ":="
# where="lmp", 2 lines before "LAMMPS-specific settings" line, use "="
def addvar(self,var,value,where=""):
if var in self.var:
if value not in self.var[var]: self.var[var].append(value)
else:
if not where:
error("Variable %s with value %s is not in makefile" % (var,value))
if where == "cc":
if not self.ccindex: error("No 'CC =' line in makefile to add variable")
index = self.ccindex
varwhite = "%s :=\t\t" % var
elif where == "lmp":
if not self.lmpindex: error("No 'LAMMPS-specific settings line' " +
"in makefile to add variable")
index = self.lmpindex - 2
varwhite = "%s =\t\t" % var
self.var[var] = [value]
varwhite = "%s =\t\t" % var
self.lines.insert(index,str(len(self.varinfo)))
self.varinfo.append((var,varwhite))
# if value = None, remove entire var
# no need to update lines or varinfo, write() will ignore deleted vars
# else remove value from var
# value can have trailing '*' to remove wildcard match
# if var or value not defined, ignore it
def delvar(self,var,value=None):
#if var == "KOKKOS_DEVICES":
# print self.var,value
if var not in self.var: return
if not value:
del self.var[var]
#print "AGAIN",self.var
elif value and value[-1] != '*':
if value not in self.var[var]: return
self.var[var].remove(value)
else:
value = value[:-1]
values = self.var[var]
dellist = []
for i,one in enumerate(values):
if one.startswith(value): dellist.append(i)
while dellist: values.pop(dellist.pop())
self.var[var] = values
# write stored makefile lines to file, using vars that may have been updated
# do not write var if not in dict, since has been deleted
# wrap var values into multiple lines if needed
# file = 1 if this is Makefile.auto, change 1st line to use "auto"
def write(self,file,flag=0):
fp = open(file,'w')
for i,line in enumerate(self.lines):
if not line.isdigit():
if flag and i == 0:
line = "# auto = makefile auto-generated by Make.py"
print >>fp,line
else:
index = int(line)
name = self.varinfo[index][0]
txt = self.varinfo[index][1]
if name not in self.var: continue
values = self.var[name]
print >>fp,"%s%s" % (txt,' '.join(values))
# ----------------------------------------------------------------
# main program
# ----------------------------------------------------------------
# parse command-line args
# switches dict: key = switch letter, value = list of args
# switch_order = list of switches in order
# will possibly be merged with redo file args below
cmd_switches,cmd_switch_order = parse_args(sys.argv[1:])
if "v" in cmd_switches:
print "Command-line parsing:"
for switch in cmd_switch_order:
print " %s: %s" % (switch,' '.join(cmd_switches[switch]))
# check for redo switch, process redo file
# redolist = list of commands to execute
redoflag = 0
redolist = []
if 'r' in cmd_switches and 'h' not in cmd_switches:
redoflag = 1
redo = Redo(cmd_switches['r'])
redo.check()
redo.setup()
redolist = redo.commands
redoindex = 0
del redo
if not redolist: error("No commands to execute from redo file")
# loop over Make.py commands
# if no redo switch, loop once for command-line command
# if redo, loop over one or more commands from redo file
while 1:
# if redo:
# parse next command from redo file
# use command-line switches to add/replace file command switches
# do not add -r, since already processed
# and don't want -r swtich to appear in Make.py.last file
# if -a in both: concatenate, de-dup,
# specified exe/machine action replaces file exe/machine action
# print resulting new command
# else just use command-line switches
if redoflag:
if redoindex == len(redolist): break
args = redolist[redoindex].split()
switches,switch_order = parse_args(args)
redoindex += 1
for switch in cmd_switches:
if switch == 'r': continue
if switch == 'a' and switch in switches:
tmp = Actions(cmd_switches[switch] + switches[switch])
tmp.dedup()
switches[switch] = tmp.inlist
continue
if switch not in switches: switch_order.append(switch)
switches[switch] = cmd_switches[switch]
argstr = switch2str(switches,switch_order)
print "Redo command: Make.py",argstr
else:
switches = cmd_switches
switch_order = cmd_switch_order
# initialize all class variables to None
for one in switchclasses: exec("%s = None" % one)
for one in libclasses: exec("%s = None" % one)
for one in buildclasses: exec("%s = None" % one)
for one in makeclasses: exec("%s = None" % one)
# classes = dictionary of created classes
# key = switch, value = class instance
classes = {}
for switch in switches:
if len(switch) == 1 and switch in abbrevs:
i = abbrevs.index(switch)
capitalized = switchclasses[i][0].upper() + switchclasses[i][1:]
txt = '%s = classes["%s"] = %s(switches["%s"])' % \
(switchclasses[i],switch,capitalized,switch)
exec(txt)
elif switch in libclasses:
i = libclasses.index(switch)
txt = '%s = classes["%s"] = %s(switches["%s"])' % \
(libclasses[i],switch,libclasses[i].upper(),switch)
exec(txt)
elif switch in buildclasses:
i = buildclasses.index(switch)
capitalized = buildclasses[i][0].upper() + buildclasses[i][1:]
txt = '%s = classes["%s"] = %s(switches["%s"])' % \
(buildclasses[i],switch,capitalized,switch)
exec(txt)
elif switch in makeclasses:
i = makeclasses.index(switch)
capitalized = makeclasses[i][0].upper() + makeclasses[i][1:]
txt = '%s = classes["%s"] = %s(switches["%s"])' % \
(makeclasses[i],switch,capitalized,switch)
exec(txt)
else: error("Unknown command-line switch -%s" % switch)
# print help messages and exit
if help or (actions and "-h" in actions.inlist) or not switches:
if not help: help = Help(None)
print help.help()
for switch in switch_order:
if switch == "h": continue
print classes[switch].help()[1:]
sys.exit()
# create needed default classes if not specified with switch
# dir and packages plus lib and build classes so defaults are set
if not dir: dir = Dir(None)
if not packages: packages = Packages(None)
for one in libclasses:
txt = "if not %s: %s = %s(None)" % (one,one,one.upper())
exec(txt)
for one in buildclasses:
capitalized = one[0].upper() + one[1:]
txt = "if not %s: %s = %s(None)" % (one,one,capitalized)
exec(txt)
# error check on args for all classes
for switch in classes: classes[switch].check()
# prep for action
# actions.setup() detects if last action = machine
# if yes, induce addition of "-m" and "-o" switches
dir.setup()
packages.setup()
if actions:
machine = actions.setup()
if machine:
switches['a'][-1] = "exe"
if 'm' not in switches:
switches['m'] = [machine]
switch_order.insert(-1,'m')
makefile = classes['m'] = Makefile(switches['m'])
makefile.check()
if 'o' not in switches:
switches['o'] = [machine]
switch_order.insert(-1,'o')
output = classes['o'] = Makefile(switches['o'])
output.check()
# perform actions
packages.install()
if actions:
for action in actions.alist:
print "Action %s ..." % action
if action.startswith("lib-"): actions.lib(action[4:])
elif action == "file": actions.file("file")
elif action == "clean": actions.clean()
elif action == "exe": actions.exe()
packages.uninstall()
# create output file if requested and exe action performed
if output and actions and "exe" in actions.alist:
txt = "cp %s/lmp_auto %s/lmp_%s" % (dir.src,dir.cwd,output.machine)
commands.getoutput(txt)
print "Created lmp_%s in %s" % (output.machine,dir.cwd)
# write current Make.py command to src/Make.py.last
fp = open("%s/Make.py.last" % dir.src,'w')
print >>fp,"# last invoked Make.py command"
print >>fp,switch2str(switches,switch_order)
fp.close()
# if not redoflag, done
if not redoflag: break
|
qipa/lammps
|
src/Make.py
|
Python
|
gpl-2.0
| 70,857
|
[
"LAMMPS"
] |
11c1220d2a7daacd145126c9155da830e5793967ea1f5f5b11d792665f22100e
|
#! /usr/bin/env python
sources = """
eNrsvWuX3MiRKKa7flxvXe/LXl8/rw9UXC6AmWqwmzN6bN+pmaU4pJarmSEPH6vRafUtoqvQ3VBX
AUUAxe6+Wvn4u7/4H/gv+Bz/CP8b/wbHK59IVFVzNNL6HFOariogMzIzMjIyIjIy4n/9k9+9+0Hy
5i/Xt9lsWV9ks1lZld1s9u5fvfnH8XgcwbOLsrqIHr14FiXxuqkXm3nRtHGUV4sontdVu1nRb/ha
FfOuWETvyzy6Km6v62bRphEAGY3e/cmbf40ttN3i3X/y+v/4Vz/4Qbla100XtbftaDRf5m0bveoW
SX32G4CRHo8i+IfNr/Kroo26en2wLN4Xy2h9213WVbSCbizhRf4+L5f52bKIcvhRRXnXNeXZpism
BAH/cUM4hO6yWEVQ+bxs2i7K5/OibTPV0oi+LIrzSGEgaYvluXQF/+FPQM+inMPLaIpdz6QfduWL
osNeSP1JVOWrwoLSNbfmB/5bAShoknoJlai4LlDczIt1Fz2jt0+apm7cyk1etkX0SI2aSiRjwDQg
+himZLNcRFXdCRKi++04uh+5TTRFt2kAo6MR1IG+4DSko3f/6Zs/wwmb14siwz/v/rPX/9ulnrb1
7ciawPOmXkVl1a5h7lRTj5/P/unRy0cvf/5qIt9/8eRXv3z+8stXo9HZplzCjMyaYt1Ai/gxGuHf
ZXkGv6FdKZHNAF0MMImxQDyJYikYp6NReU6z8B4IsKwrmLfz+uTwNPp8Gn3CeKKedU0+L87y+ZXq
23ndrPJuxsjFinW1vB0Vy7awaunRz9a3D/cEIZT8GKr1Sfm6ydfroonypt7A2nnBlIxNRFy2JToM
kuEEZvoai1qUBIPHqb3MW6S3RApMovG8np2XywKneZz69EKFGMk0OiBXeaggpMO0SktAwcaZ4xqZ
1WKoPCy3ZVkVVe1XMS8OoqN+zX4rTguyOFzqD62P17drtTQQY7mN9OPofgOLQqMvTd0FD89NF+x1
XrzTc1MDZ2ksTMuSMvWnXAR/2CCqYhcI7C4W0CC4+t8DHwZS6m41sHXeXfoMC4lO4ORUQIYcreuy
Yo5YR229aeYFYySB5gpgkzms4k6DWZUXlx31hOphJeS0826TL5e3MAtlS8CQAtJM0zD+WzOhYdvZ
sp7ny0ThxCYZg/F7wO9vz4poUVdxh+QHnSnbaH5ZzK+gCZ/01xm9STwivxd9++23AglhXObNAtbd
srzCwRXRdVE2C9zYyrlXr6yoQNvB5pZjGeBHJ0ih8xxayjbrRd7x91PoY9F+4dTH0YbG50/qemgS
zzfLJc/H9qmUpfuKp04mFTgSdR6BqFnFHkT1OT0n+rXg6e8Ot1P8jQGYMgB0EtGuRy9gUVcLq6s4
5N6WgpUMuX/ngVmLNm7VCInthkZ1T69CU1BAAXmvcxgjIMZBipoepxfW8PRQcItvLlpZuu/zZvo0
h81jaFjdZg3TcF3CAsRxQFUQmWAhIW20oeGNHLICYo+hjTiCldAWXfS62QAQIBTVAtZmWDLVULpk
oahaOKBEKtNdaKPrywJG3BQt/BrA4yVAYaK6BMqYb3hGAAe06hERI2t7sdaAfgxlQBSBERMjxaWh
ntgrGnrtrmNd7WNd73yZX7TR31rSxd1qaBnEm3MpDF0gRJ4cK0inak9/2sCL3qb+S3dPz9Wufo6l
o8t6uSDOOCPm15LMfD67WNZn8ItgAMe5viznl8BGcRZQjAF+B/wVeFfxPl9ugOEssmH5dMJN+WKq
3m7pbQYd6G+zvDmr3lhl7f5ZBWUMFkx6ENouqYT7whM7SERSgFjqGGCKwIy7Aok1xDr0S94oGO3w
BZa4TcQoJ6pOZIbLYslv6qrwZIYgGxiP0+D+7oFEecrtscyFxT5wXmXyWGL76CMgvNYbmpp9VLIW
Raz2Jkat02HkDqiQNcA/SBjNl1G+WJTylaZJ84R2FBhsS6CB/jbLTjERaR9ghHcNQw8OfQBC1rdJ
2isnm2dCI/UxSRhhXLhUOdH1bfzdFPPZHgiEYh+CvH+/DXnfHyosrYcHuB0fkYUQ1IiUHGkzqN5O
FLf5OWAD5LzqoCnmG1Cb3kMbsAQOkEpTWE8NMixSzJDLx7LfBsdtFkpZZwiZ+iE9ML0rW9DiNsVQ
BwWKvfN9wB67BCGUKBf32jYiNRqrLTcwKhwJyKpm29t3gy2r+XKzKHqbqtpI/c1n300Vug1dA3o5
OTXUgZ1sLpBUDWNRWIDGPSG3p5sZuBnuSdUiSaDqxCXJE3h0aqk4lhr1i+I2oEARZeL+x7IAi+Ow
PdVzoB4e6KZFknnR3s7r3rZK/VFb6GulRD+poPd9BTmPEBJguMD3iIjc0t31Hog7JWr6gD5k3qMt
GjMB6plw1AsWR+jrwO6pXmfdmb2DDonvJCJ4u5S3mzzVZUw/EDrv4kOQm2KJzQ/Atvt8IPNNOyvv
TUbLtlGFa7Vv4xKI48/cuYrut8f3F5+jnuyDR91uYnfh46MP28p3CP+bpsFt3mz49nqS7XzaH7ze
mHtI22u73l/RJv0aVGPSHy2dOsAwQyh0GB+v1W2zHRDB9B6kupxoSBNaKOrvWEoCtwSZtWiW+S0J
qQ3Zi+xdBbT9ogE+Fpqvl+Ytb615uUQwZoKQTyrxIgeIHZQoFhEuXbSd2YIFcs+zGjSLa1TOsPv0
vqVtGX5hDRGDfZFOc4OgLGcIo+MdMtP9SzPc7NaJywxvLBl15mCAIE0s9E9QIdosFzMc+hQ3jdTf
Vsj0CkwNDQogQ95MsB9pgG/7ZiqDW8ZggWbc6kC2Z7JYRQDO4+QuQqbRTZB2VAGH5DSXCKvp99Bu
/o+s27CkbdnuRF05OIpg48lxmVo6uTIv5zfJFs40iQ5d/drqxiTK246MLlOc4LAEoMnPLL3M02Ov
C9iqeC/HHY1IUYPGpYuzBWwRpDI8Tui6prAEvnukwEMNbfIlSo4Ql51tm3KNx0rRYKuKzW2avLoo
ZtDOB3GxUhlUtuowtD9ayj/AhgYrVsWclwBPowIgIir6UBlCkH15hI8lB8HQctfdUM0iK06gHrMp
ywrboa1Gmg1Q6hZztTQyiWYTkCnwcCM4ATbfnwhaJ70e7/NPGpzKZ+8c59Vt1eU3ATGLe2dv5B9b
mro5mbgjigmxJ1Dy1Mx8eCM8ITQfQz9OeR1qYrS3E37oyPaX5WJRVFvMeiRNl+fOLi7mETyzQ7ka
xBEt5wG8YjbziLmtl+/FXo3gXPF9VQM9sG2P2CaqbbDQg4J3j0SGN9WTuNer+HS0l9A8JJv3WhKt
bntTe8noAp10JFvMa7uAlNfr33llb5G4oHukCxytwKazQAeoevzFF18YPVGObnxe4ZjDe91A0h/Y
q5f+Zm0wclbnzeIZznyzWXeB8x+vTrDNMfS+J6iNo+gpWtDvNyD94m5xv/11FdFfFIXPK0/w5cPe
CcG0Fkk1rBb4+BE7pEaToFGvQYbvSHNS3BfnEBxKc57qlaCibOlc+oU+gyyqeb5uN0s0PaEIV5+D
Yh9dlheXeIaCh/DGrktH6LwqFRhkrGVhnavj5xNRt1ytYkhx6848ToJvy3xZ/seCd9eL8j0q2CKO
eCPIfMufOiDtziZRDKpWVdx0sSeEkfqWAHsKCGfXl0gDqO5uZbf477YslgueVNZxEWKwJIKb4t9M
euQRZdtlvp0XBmDJe/09IVQJqhg6nG86eYwrfMr0w7QrPyyJTJ7AkkELiK4wZH8xBKCkXT7vRlJU
hy0kOOqCLhM/u0Uif0/m8ry6BfJdnZUVaQFYlbVM2RrJim7LjrC1uPyIHDx4k8EjTpIgOhLyDs6K
Ay1SW6f6LSooRbMCiAu3Z9TrfLmsr1vEoPIkkUbU2IIYAJkmcztWN+I50LGZLW9Ry0maYlW/Z/EV
urypaB8r+Fj1rOxaPrRaFPnSAUdHSng8Q6Kvstsq+fSBHl4aNltCZ26UucklJTHW31isqfeeFN5p
NCgCJgmVEPE0gsZMrSlNaNo7n8J/iUVydm3b10FBIm+QZVfHKZTorzOsoopmVNAGng60L1RmNX2j
LUJTocGBqrZW5NQPaz0Iz/qZpoP7uuHfN8ZuFDyQ8FyRSthANTcAQdBqgi2Q7QZ2lkTD5x0tzezK
WM1mqJZGS/J6B3pr0i5L+H2Y+oOQVth3ijYjgAgPe50nQ6HmxeUSVoDifNV0ma/OFnl0c0xzepNp
uTO9C0PC5TKHfTQHosextREtPH/FgziDSz4631RzYkC0+lD8NSZKZeyd2E09A5juMpCmJ8Sz2Fzg
yMVkQMVVi91RBUCXzmFwLn2JacmaKJL1GAIgpcdOAY05HucQ/+JxEh9zwTwjNPARJBpbGK0OLJiF
wgj6KVpq3hfpthMBQ60yj0pSSl0lf97k7SWR8hb9AUimI+MHd8Bi2mzEpclZFrlBl6DKKOrEn3W9
bJAblsgNSS1MDkCYO1gqxYZ+HaW+zsZCDZY4KU9Dth827xrcDa5v1whtLeeTg6NT2yRHZzY1bBCL
4mYL0oiksIzaFYgBPXCmHUmnKQxMp291U17g/gs0g6aBNUqgTQm/We7kAZq6fB7QWDRr45bNCtPo
t79z0T0xlv6iQjdSPBXzBiWOOQvHT4KOmVEaK4oF7uN1dF03V3IK71Vlhx6a1WhVdDmM5AKQscIt
U44FF8W8hrbrhhx+xICzLj1AvEguior62bqee0SFl/l70movH9DBU1S824DU2t26gNA5CTuO3ATg
dAETC9NNz8peLpLeG/RMETzKLuW2RvYp0BLEhYrQaKYt78a0LbiCJG7rZJJpi07YCHP6k9OeiXPZ
p+lzdwS996Bfo49A34PAJg7ydsOSMEfLsLQNrZ9n6nTxPJND5Blhfdh+g0cfMnwapHRidjSFL3ev
9nCqehravr0V7ZKUPqizJ9Wcd4dseSM1PpK8VmvQTJJ4cEQoXwz2Ow6ONf4CXWwRlbFWHp8oRvqs
Oq/Dfq0t+eECxyUfXNgk1LrQGqSZ5ctiuaYprvL35UWuBWqPQSsGMiPNvwMVCY0M8aDSuFlrlYXt
276+UtLpcNheii+m3hh8SvdPGmhsliwEME6OTifRo7YFGQPQRZaSAFFYFnpxFtd141V7EfsW0C19
CFOc1UCrgW+Hh2NRPzJSmFoUl5I4p9FE8QBxs2DnTJE7/uMo9o5fAcPSOeiYsa8fe5s20Z5bFQVN
rnZyeDpcU82IW5lZMtc+2lIbtxZNil77Z1L/4Zb61Mmq5/+Ej22jGP4GiRgfWebOPjQt7STqWKov
2Xouy6aOOcsyIpmMJL3DYbDDAKL7sNudgWg05RPhKHHGB1q6SE2mH6njvDNHV0y1aptb8irc5tzh
IoRsxnwO5oq8JAjHCmAsVuOiVUZjlsE9SgFgrguvOvpziXjC7qPkvUl+zxYMsQB4NgOHogrxBclV
o9oCkbR1YMFAEc9ITVqK3GYA2GcFCGcgBF6ExXA6HcEtNnRJwUzXxFoY1imK4rTZb+qyIm247b3F
j6zxbbLIYQX/veMKqmExFo9xBNiL1dSJpim7xqlPqfjYorSmsbgz0xugYvcphUsfhp+gLA3vQhqP
1xDjlJuz1hnetQBaILXHX2yyLhR7UMtELxD3fNnWPcw6y1ydyvIts81nAfHSc1gfkCdlrC9hCGgW
/woECcRSYkNHI7j03dX1LB81GNA1SyG87PFU/HZZTMfLuroYu2JJftaS7VEKdmesUU55RaOGjv5R
29gH7oApOW5561BZJN0DXddgZ7p6TN+VlwX6/HqKqFsPB3Qc4YD+mebvn6v6n9Gc+d6Sc7iUyzhk
fMeoiBfKmB4lrKr1TomQQDsiDM/sAPJ7S/TKmJ1y0wZ/yh4hg0M45UVVgwIX1o5LgYQSZczA4uBB
GhKHKy/iE737fENVk76LGd1YsyjZZ13JsPRBrGz4NS/FLdVVmyBTGNtav0KaCr90l3Fq4f58hQzr
KRt4i8UTFnQSi97NV0X09DdM8/JpUb36YlG++tK/fbECDo37eqG6gQxn59lgmLOQGcLV33psgHkG
99jmH3iUPXG5Wto3XcIqRR3K7t+mKpE7/Yvpo/RH+qnuBfiz3dOPhOmwvUH7PonDk/JbeSq2RtaY
fs5GhrppzWnWPbaA+AdtfGFxWV/PVnlzVeCp0vhzroGwradPhu8Q7ODImiKZ6+7LhPkM1zCZqdWO
V4h5i8cQRVIVjjXV7XoXE6R5lHfkq1tAOo8eEPzNe63cXsg+5WzWaEGr1JmZ+D7YPlHn5cUG/cZh
Hrko34yh00nPX6d/lVIddAf8EEna4eYOjtLf/5l30D/B7RAupiE34W2N9zsw1AlrYR36K+1TWI+M
hTQ6YIVCewCknkRFy9hxF+t5/SpXFl7xrneZnv807FsVPvA1rlo8fYtCaCUNu9dYXdaO8tofPugt
7LnUe97+M8s33h+u8oxUq6F3qap1Hc8t53MW/uALK8tl5YiKljM6i6MBnyN4b7ufM0CR3NVwFPzU
c1+05Uvo+yNsi7c2W5CcOXOt3DKRjc/I4j09YBFUm4Am0U4V81wxceK+xDIX0Watppl0oCyoYll4
3OGSZxyqvItF6HmS9nxSeDBQ/NBuwHrzWXR4PFTr42lk8ZChMw6rSNrT10p12MkgA7cCmuK8vNE+
PtYO9DE6zkRjlwP03BUUyvraojllxl1zUww3PY56DYk/jxT5WLm3eeczPKNCqOyu41jF5srIFl7+
wu2nRiBUW4WHcdMWqKbYlvJinCkxQ9qaCMwpf0yICPOl+BX3uArBdJeFa1jxwX5qIPrrIETKanxj
+N9H8tPa+C7E6bspHFOILd7ApmjtQaoN6daAepu162XZJfGvq9giWpKQbHzbcs3H0rmTo2P3Kg3R
AbnrUtvHw9NvNfBx5NKCdZQn2Aucdtj98zAVni1i4JaP/0S0ygAPt/TP/gD6bPyquKWnKP0SEuT8
RJS8c/yGATJ+CDP79+N+3azFSBtpb2sgwyUAwjJ9DCjLqvhNYuHT0FJn8ycokLOZ3HRrZ7M4vPad
GRrbFaChz9Svz8d9q3aY0zDdvibXdeOAwwFS8FD8rGBHGuD7Z7c9hyIDgUylSap9AyZyLAhwyXwj
wUky3MQAYwNQFmV7sSlJziYu875o0OWpIoESbRVZWF8FhU1ipnhbqmfDc1rDaUdOL5VT2Dl+cqg8
aizD1RZF+d42p2dyKZzwzbtJhOFxhk7C3Em9f3B0iNRKQWnEs1F3cmAs2yZXHzDQVSkF/te/Jhs1
gR+CqoNGDL8WA8WajlPlQzCGnS7y1VRpj8jgrpsSxONB8eYrXvxiW3UZg9bwZsYzQoQ8V65xVJHA
lsXmzJKMO8isyZGiO/OsVe6lA1+g9vyMvncRpz+/94zU3vP8NzdPJpbvzCHeVfkNOYcON+ko/wd0
PWW4HXM7ZYvoZ/RYYJTJmLa1Md22AhXaWxD0UuFcG9akjkc060ZEdue+hrUzDUhQVk2+ma/VBm9S
tfKnRBMlYPdEa0us4F2b/m6Tdnpb6UBnV0Xb5hfkiE1u1sgReD7cADHDDN5AUEuBjzRZ3tCnb8D2
xi6GxTbA5I/hxuiGnzEUebthuSxgnxMuPGAKt4kLDeLSNw9RxAME0B3n1qlrTa8IFdK+50fgW2mJ
u9B0TTSxTCzQE3uwMuMhMfn4O0i7n/ZF217fbG93/ssSrKVT62kz7s39QFm2MWgPe0jVD16UZmcF
MvsltdUnDjGRPH+1/ZJGyFsWd+hqjXuz7NUMPg3dxiZ6rdajENiBvcTVCpyrIOasWC+W3um34m/6
eMJDn4FhWfDZ9c+eX9t5LXABTSsHzsHdyw1ogasi5EzRg2ha9zzlTC9oF7RKOlZjdd7iMHeg5SYX
tdLpMt84Yr7Y92Mzp9+e26A4PyTcE9mKUptdIZfqcQQ6vbIN30FpQckJNM09d8XSiF8aXI9FCzKn
HnpDfiwGMeMf/vCHsHSVXxk601MsyKRFrisKyN9G67qlyBrpuAftDISoqxAzMK4LMoSJaVmfouiN
1Ben7NOP0ALAQjYBB1CruFbqVKLjH2vVeMdCo50HY07LEwPTXNqh+wj5Eqse7zq3aY2XtTk3cSW3
QKTEz1SgRAdMVlR00BFvuvODn8Z9m+ZepzT3oqe/eqYlUCCApXJhWN+i3+PBDahBFMqraOh2jz6W
1RaFkR2aTdsWjHJU1syjyzp7DTTx7Ll9G/TaeseI/CVJ5eiUXEzL2veaqTsplnT9WGYA5YKCWWxA
HtQ3x/d3obnfGjeCvIvuH96YaAraL548PJWftBBBn25sskiPB6NfDFGXdz7j07/zu19UUb3+7h3k
FHKKRRxU9y1fLOSNFRR1QgY+Mna1xXo6Phj3zo4EmrYo96vZpwHWDIp30fXW0Q5NubJ4uC3pwCyq
V94meA0v1tDwehL1ZVZ4S1qpAEzt2TX8LTCzrIwVKIuMZ9HwCZ5i50H2GMKC2eqsXyMvkoDh7/r7
toO7vWdiXSOp8WVVG+t6G/W72TNPe5oTKVlh+zI2FjYZ6CnDsQqe+xYCNXXjsR9sgUdBF5J79wKG
aMuErRB8HgfpxC0TJhfxpXCeDfOD7oytDcfhCRyzU8d4G6GcaNFcmlYwT4fpRryWDSuzqu/HyAY6
rrRweb0v7QGCyTYD08n+bQZYmvY6GuoiyzJ7LElLtVJHcfxLOYAE1KuBQfIQe1NjHX3bP/sF9QG8
+RGAxt3Bc3TTsd5hPtst8HNflFu2bF+X9dseWjW+Ud8gJ6RkLQRjtrvh+AmeFA2vbuaWZ/VyIV4N
AGYK/7k17g0xA97ne0O2Z2Vo5PJ622a0a9j7D3n/4dpDCJ1A3LO5p14Tk2jMts2BdvtM1GliF9e0
SOX4bvAHCGyXVK3kN7zyz/+RNXn866rPPnaGDPEGu3956bzDnxyz036c1PbTNEYpj+Uoc5iyQAxE
RvNcQ+mlMcHJt305xL1IxxRHLVgaqTfdWoLNFjlGSHX98u5J4Lm8skqCgsBXsTAoSFQsSvShijbo
q0JBnU1Q6/ZCKUyqs5rYcADtBYU4ppl2L7/i4dnBkRfDn6DB35Nj66BZEyVGT2uPxaypsewEj5hg
bXcHkkOC/eZ26x55px1yH4ZjsxGXKpXfxn69Ju8Or9OyodFGdscdps9m2Yp/npMD3Dh4rilRIvVw
+0DIGoh9gon0zqsGzt2UQa9N8Ui5EC967Ac+eBh9jhjEqE7X5cI3RnreHlRr+GKZPRPcwPDJm+AB
xnKHQ9P9umHgfwxogl0AhhloZntTfkc9ANt7sgMR9gYB/2DjU2fI4vcnsf3ml/qoOcnVvQzZI+XC
ne37RP6dm069IvblXueIdDtdLdGh41Y/NE5FFIBLKh5vj82uy43sG5vWkOx7m97tkti9xKmvIFrR
ZzUU7yJbKFLtQFmKNiUPvFdqsIKx473GIIXv0nmpsr3XatrMiaU8YXrYVB9GEXxfZy+ioAmWoOH7
EsVe+LdQeeLTwGm2rtW9ndBUbEeVA1nNjAI5Utc9JKdIffYbuuU1175MNppIuLLuUVv+s8rBwiDD
OaUxUYPRfFbrmGE7coJAeevWJXUuLlczbCxmX8ytRbEctbZX4b1LqhH4Zfk+1ZxCF1cLJwIKV3ST
qMR9Lz63OLQDsKCdVMMTvwZJ3pOVLe3lieuVqv7dcM+tuc0USDPJcjgX2EdUZ2529tyb5ZvAtI9G
7/7zN385Y1Ny9psNiBU3q+W7f/26/Z9/8AOmLmKW+Foim6MJNvrHN1Dy4NuvvxJxcUI0h4EhKTzI
P2wWLTrnA3qQyBcUUu6Cw5GioRoN6Nlo9LMc40eSyxuFpmIipsX8sgZZ6Kv8elncZiMKWtzLmVS3
6ltT2HmU5Csed41G9xRbeJh9S/35BD5xuUFfzkoKK6BXBNrgL5vkxz9KR7IAvslXNu1zAbw0ftm4
1cjO/yjeVpGDAYFGYGqivS05GqqEb8kZrtNm7H/ECUJGDZOUYfF2nWtvegzHOcIR/7KgyAi4VSoX
w3ZzhjG5JYxFWYHEVS50t8iRtcVAZHWz4AiBAAZn9yg7tMKacK1SgpeuDb9dZFH0DwVFhynwjGJO
gc9GEul6cQtiXjmnPDdovi9yvM1OCWWgebrR0QGA19hPWD/cHSxB7QGUORTF44/j6DF8i46Pp9G9
m7+L/hn+PqK/X8Lfk3s3Dw8P4PtPnj495d9PDg/xydOnT788HQW9r6jY0SGXOzqEkk9PR7NlcZEv
Z9zqNEoObw7/bhLB30f0F9R7KSF4gyI0AVDw4SEW+ckTUWPhyU/pCXbKPMN+4VPsmHlK3cDH3A94
oRuC6Z41SD4nithAiD4AETpFPVrodlljDAv5gZHmgo5auE6x6ISC0aU4m85oRmHhtb6OPuNcbfmN
9OE03Dto/CY1YbFsZJ6CPOvUGZVLD0SjhYdEr6uT/3C/PQVue3+rqq+LxykbFZyWABeLYun0xn4g
Y7eeSAdpJz4rK/pdtPN8XaA3vaWMAYdcJiuUcFx2jwowLCf9Krto6s3avhBFevBnUyKE4DVAPaR7
N/cPH36LKLCCX/RVgFC1T+1q5mIEMhDYgRJ3AjLgE3iiupyoMtaQU5FLeLOY5YsFp3pIKNav0k9p
lCgK0kM87uNxj5XqKVtKif5cpkZmwMUHB2ojwiga8uuAf+YkzkzHbVc3hXvLdQG9mo6hGJoFxhMK
U4OXLMbyW+Rgvs1gV8RQGtPxvCkwWKNuTK5qygZIOaAwfhUHTURvlh3dZ995ewT6yZZB6E7DRrG7
zwARRXflpw8Mn/YJTmMEi88Mho07Y3cKla8snajBN5lCQSEFR8HHGY8sk+dyjw7afI/u2jX0AHk4
vF3WF7ibt0uM4INRctsooeNpLSQr0L4gxg0BrqhuWUFfbWlG+oFUCr36qr6AvSkRWBOvlxbyUx/A
erm5KKtVXuUXmEeuuIC+Fap1Au8iCATVQRRZ8qfu/YyJ1AQe4SGbgSCDsVrb3r9NpXvIPaOuwduL
ZTHD/tE8kw1F2X945oET36DJc5mj22i2vkVbwthiykIg0Dm0w8VJKoFyOW3TIcY5Vl8NnAcAJc7E
j0Fl1sNSSjqReXGuboaMbPUFrqaJUK19h4LfIONs2WOsuFkDqYBcCcK38wizviRS3k822AdTgZBK
3mbqgTiwDUHQV2P4i+fiAJj3nJvEGknh0JU/6lUJavXC8cvV9y5MMYoG3FIpgy9cUtgKXj83J9Pu
yZaa6R45cMkMl1+5kLsg4+PjsTVGi0moiT62HbiUJZBH7wW11HVR3QF9ODmc2KXTALKUhYHE10yP
LAx3Os7kVMA05Z0KUDFvU+Zh4H1LtyyqAVO1UBk3QMaLDSsjMTklm8vp1iTQASfoZWsg9GIxYwY6
NBttgSd+6LWv8K/8G/oOEeTZHAM8ihHVNL7aJt4NRCFziuc+fiy9gD1pEco+q00QVDNg1ssvLHZF
04C89hZ4y+og/rgPzNiReeYAQOIhXDlh2EiT0qE7mYYacJIUXPdOJj+c4UIaRrRZaR9P/TxsA5Mm
UHoN4VJE1j7Q1j053tCEk6FKhkdJngeM5WIQUNQV0Y2v8/YGm+z5sVuI7s0CrxnhJomcsoCYg4AO
5I4laeeEFDylAQZJXoPL23Eos4ViTA72+rIkwofBc/MKUbp5alGeeoeF+FSNxiMZD4uh3lm3yQJv
hXd+54mf1yD2zrvvkwDs0bhI3IGUfXIm6HmQgaBZRc+G1wfZYQhnQ3hQ5PUHwYNq7HvCg4Dv4cGh
ex8RhKStPMcZAhf/zgNwl1NbYPy8D5nGHdP3obxon+lScNJ9U33oMSs2Fbm8ZFtsdhNZSacZgc5x
/G92y7EnoOfspss6/h2veHzH0ThgPnXgexgw707+7vg0/VCW7pxr+2PYhVW8kQNqIJtvEeJ4Vw2F
f9P7QIye0e4d405sV9SpZkNi7Qykbi6yhWIFEbzV9yzyivowWh16LqJxcXyMDs4Uf4nX0oOuyJtF
fV2FRRtXzFZ93iIFsWDiFyyWpj+8PwVW1O62vEH9UA9qa4+YEYXghQ+I7bpqP9o2IJn6DxqR3ZYi
9iHKEP69kyokudQQZdwZ16EJ8/vuCwsfhm1/qx3ChEq+Y+9IxQ15Efpbkr0laAND3uV9VqUAbNHN
Rn0OFVbU3DLcTWx1Yli7TiBEb8cDe4Ol8fHYB3gXF1Ewx2kfZ62kfEau3rvRQRxsg2yK3s9QIUSP
V/jI8E8yCO+8rMp+iMHyXB337XUVxBgjpBaaZudthkSa2OYKUMiuY7wDxWci0961kUCuEmPo2Bua
QYzgpF6HUOKWwoezRbEk2vIrHoRxbCwVm5UymTjamy3Sj3wbkPgXx599gaY2wfJ0fJQdjs2gxjSo
8RefW1hy6xtCpu4lfW5B7wLmhTBN8pKbWstv0lN+gEdICR6bWwLXv7wWVuDZK7A/U4WzgC1jfD/7
5BxFBX9qTNk0U0cAckH0MO0jaL6s2xDxKzv9rN2sQL/UMWblMfOtwmZG/ivG/Qz9WscHaHpU0d4X
ZDfF1pW449AsdPLdf/HmL9BRx4rb9O5PX/9ff/qDH/SOf/GodyQuFCqeA4l9I4kewUtOuWDMuuaW
ayYxFogp6wkVNDG6X0Gb6GuX2AEoLF+LdnPGBWu+F8cBKjh3QrniFDMqAzglosFkS1LWWlQU8pqT
IrduuBcrMWLLEcAW0WLTqODeKBG4cb29xNU3A8mxyL8AzwwKd2yZuM1LZe/Wn6IieT3hxFWOy+mr
rinnmDiuXRc5hca/bvDcukbbMCyQaz4fbi0LuHR40/ezGMdjDqdRRZvhe7/2nNYYrDYZx/fbmM5C
N96ujEmpx/EHAY1BMB/HIaChrX6vzhlbegzLohn/OrZsJ+piQXJzcswetfkNR2g+dcxKEtDt88gt
5CGTLMM3aJtN3HIHn6QPHjx0pZffmNJ+4YOyH+JA9bLE89wbiip8kx78pqcDIcOXUnGWZTGyfI5B
TKW33Ick6vOCDgfIb4jCba7gkrVdisHiAwL+kedG27vOfi963dxKEvOqXC5zTvDOsc6u0MeiKZgX
GCbAKXAk0LaPHGw60S0HUgVqUgJe518d3h2fcI63YyjQ5Pag/PhYYqrrAy2sGqu46sgqN9VVBZpU
nO6+8q+aEYtREbzdv2N0dwuAaLWoOxo6GI8/O7nfosINy1GYa1kJcz7FiOziPwdTeHhz/+bzGHeo
YGsS31naBfox1091OHq6h3qz1aARXM6wZwTu4IUWNJQMLeehJU3F/QV9p5XqrlZKRm5FV8TgaNTK
9OGnh30PxZy2wwPaKPEoA2oS7lU+ZpWaho/YJQ0hAXkq4V3V/WDt1sjpt9ui0deC2+i65EBUOk61
pNMhx7RcgsZ46YdxDcdqKLFkp0F3pQ4WNu9dc3SAwvTGwIMaSTVt7dWw9yOaIwmOpXJe0fKnFH4S
cJWT69A4cf99oGQVVYMy3Sg/LbpskTeL6GH2Y0yq427992CI78vi2hqMymLHuWdFKNJCTWoeGxbP
dIIz471F0WXgHeVqgZdHPz60D4NbbRHmc513ozf/VqdwnmkX3Hq5ePdvXv/fT8IyHQY1IS/KEfkB
ijNQo7y2ya2EclmjiwpChskaudmidUuq0s+Y0/jZCGY6PDbsypwlcTRC7aS7hFm6uCxuPBGSGJUO
Wcyi/uAxN6Uu9jX2m7mdLmBiJcQYYNBUQHI8U95haf2fYOJ7QZLxYXSGMcupUMb73rPz6LHsQ5b4
imUp52MVPUaHLXZ/wVLrpr651azQJINCipSnN+KuJ2kcNFDKCk7VJYLWY2SwspxYPznbANDoI9WV
j7DaYyttpASOi5rNEnpzVizra2wM1uj7ulyQo8RG59lib0PQD3Hg3AsSk/v9SdzRP6aYcYIGxjbl
2OHhBSDdCDK1SzSHmF4V3WW94LGe08qWZFjcKvKMfNPVKN6z/2NDzo8VwkNwz2klYVranPnIsrwi
6yWpD7nVGECCUkix7DKrG8HFIDRo4xCVEYMWmS81fUQO74GAKaK/TohB8BgZxNGApKEG8FvdEcEC
wrJwDkRaaH7Xn8rZDMsCGApu16oMIgSJnXaAhrjQVXEL5Rir0Oef6QR8E2aI1BBAthrnuGwEjK6r
IfMoz8u5O9/R9SWou6YrGCGaEO7PsqyYqob680sDhEIewgSrjuQNvAWEzSkHGW0uhMK8UaErbWLC
wLcY5Bx4db6iCH+PE/Lj5+2Ks6Au6/qKYwjrZhkQ9R9b0N2fRgns03j3tAbpFb6yLzoFT0S3wi5a
1EVbxei4VmE83ltxKZYWMLBdGGKJgRoQoM70UkX0goczge8KR7gT3naXtHcDLdm4fCwbIsUsATSX
i6Jhj+SzQpLF0bSqVbUk4x7eZ1/eMoaD5CWR6hYNiQhAXnnFe1HOW4Zgy+ZUE9vl2J3sCUKo32Og
pAWLF5oEeYyvCon3J7MWiWxfVAud92JVLzYq0iT5qeMXAuTl67Mx7VyZrfBp0tR1R10jTItSAB8f
XV0v/Nt9aPBj8ahX29s51AKmCv4rXYkK6F8jK5apCYejUWMHh+mFY3cCsejKGhsnAGHP7O8DoJgW
1PJwk6oO529F/mWuS8IPx4ZhsdkeH0fPMeBOlyVwaFjxt4Qm5sC4ddhQmoLWF8e8luo8TXHLMX5d
9Ep3lc5lz5d00h6Fwf+WHOhS3eDNhkCOyAYEZSycaE5TeIFz/bssbtZbK0yuKgjjWDW1OyN9jUfF
1nUIATkSElswNq4o0lDnMRUkkcL0GvsqdR9nao2d9o97EMig2YZf+yi1Y+/4hBdKBWx1akp5xBOM
Y2aeJv11l7ruYd7YjkPBDK2gbmbAEjg2cIJtyiDjsWpY7oCXdTkvrNBXFqX4NOIfxkndoB96f7ju
GSMqmFI/xVPcoyAUKWHnqgvFN+VU57y5rND8ivxa6obAUrplytNImNMdmQC/3j8kVXy/Te43aaxN
2c5wLVOAvTyVJ69HHXMduxpJgV6QEzd8QE1TEFmwe9Ecb/doOaXy4TIkjzRuy2K5sCuOzFMorW8c
UYIKUKQ7lBQTkpa1uvGItTHYBgp291Y69jkppV2TK/k4UzqrpWrpsDYK1WWrroiqjNWYL6GPcw54
ZBKUvdfX2f0K90AuWS4PKCYX3U9j2xClaOQjPvZSH97QsCRdw+2yJ7pS4k6mXz7TYRimUfwZdu/z
OLS1MaveVXjOwdlE07V68Rie6Aw3CTacIg/Gx0ka5qCcgEIyvPYNjrbme1djIxtEnlr+dKkfG0c6
4fFa0Yttsug9gqnjruuTDgFnH1hsqjtSgc4Ucwci+JqkvYQDqOKDV92qS07sGT1Nd5EEdHX7JHMr
+0+wzOtNMZ/9QSZWI70ClmmbTwa4ZMDQkviTbGL14b3GxOE7AtHayBD1WM7hHhxeNkwEOqh2fL8x
wfyTVBkqLpb1GT1AVs5GFzv5aI8mVMoTwrzOnmzWl8oI8vudiyADhNa593sO/f/bQ92+RThjlVM/
PdWoz5rHDhoCjgFW0h3XP/5fMoK2bIbtBg+9vtGjSrlzXCwY5VkvKR/UNr5NtCQr+THes272WsxS
dK+RCD92RRPVnyZ1ygUHSRYQZXPjVNcPdeSaet0OBgDg/sS8+YSu29/j84X5ppNMdXh3mWxhRuZo
g1k9UPF0CMje5sL3+t1Yv2rED3uooaf9TelhEDcDc4tBe+j/44EzMUS1qTHp4dfl9U5ras2OZzOV
PHu2LM47bNB61JQXlx02r0HvkY7NET16a3Jfp2WiV6dvUxoxQ/4wKDScKeNGSTOhg9JhJnG3Q9Jt
Epq1qqhDagE/qhb7LF4otu/CVSTgXaCj8GQ3lOzFHHm0QTGsT9sBeWuIsu0eKM9Hn3aDse+EgqxZ
H+1cwVbhwAp2V29gycUJnsPGfDjJN+Ls7mMMrDiN1VQ9b/aZqefN/z9R38skAVq2zdHoHho43lR5
c2uf9kyno6uiWOdLzGhMeCbzf6sswfBtjRn1MHR2Ff1WjmZA9AVag3/HUYxUZzEV/BJPdLln1fui
6bBc8r94pVIp9rusBC2oRZPCSHvlck8fNXhBIkRVfcpiG4Lj5eXTlz2cqfma7kE8gc19JwUFJss0
qj2x4jDy7vZvO2HebWMyffywbQW/2JvTH35TGbGdV8haLV5DUamshp+VgeWwH/0/WiyE/hNfZvi4
t8em1oJ4tTkbqniwteLXm+VQxY+2VvyyfD9U8cH2FuvBMd7fWvFFfV00A10d7muYD/Ac/VEYAXU4
yAjwTdorO8gIaJhhSIyBfum7MBVrxe5csEG2g52PJzLgYTayNzwaAQCUkVjw/ph8iYRmmqfvLjTz
yP5l8TdrpRhT1uN8ucRgrntpwFLWtXbU9W6zjnUiZKFKPIwQQhp/V+PF3XZFvxdTW5f9I5tBxJcq
wAzIYcspF2QDw7Lxe44X9lt7MZ5X8THD4uH/LjB/TvEkdmTtfEtEXTylNVOfs0H6F8Xtdd0sArLs
Fb9BcnMtfvpaAr3Cvuzh8G6g9XIs5T385u4qzQf5qyS79rByf4E2OjwuRBS7NfDJiVQ7pQGEpX7V
32DKRWs+Pp7qToDsPolDpo6eZpIPs+2Bm4G6sfh+O73fTsgIKX2cqB6kezXOEDwAA3zf5PzIOY2h
Z9RTj8MrRL9Ow7XuOK1YL946mQZyYFItHH6EStjwtAWxRnWsrocmUKFrMYCvxQ6ELQYwtvhQlKEz
0HaULfbG2QchjSotdqAtbD9M7rdp33rIfNa2HOJ1g4AqHcifmkGf+BY/dD4dyKIqqcudAEcWGrbt
jbushyBPuwzp+z5JFTMT4cw6C2HyUcmIle2eZIeQ6b7Rg+mfp+4QdmNM3/BbCeD+6+p3xHUakDUn
UeBAj4Wgn4uD0x4ykBT9w5wCBDdgKs3clHdd6M7247GdRLKXcv4HOYPvzaWMNOmb753Bp3YGOfai
1D5z6HhTtMaHWMkjE/ZA5qQALQUAhjF39j0oPQFJrA5YPFxN8EwAo2rNZmM+v4sDgqica/qzqGr2
5nLLUR4OQ18809OpxOJ+1vq7zfbvd7r9vjoptMjOab3/I3EAMvS8LA5KvUpVdkLM22BcMMj2ow4d
6JrGXucOVHIfHxCKMRNkFvgmdcoFmcW9qC1X62V5fhvFfL+EdY7o+hLoWr5P0VM6tucgYYAGJ3bo
mZhqATZVbY4o3rsO7tQ3Wx4i309t6xfHW/3eo5OjHx0fPDy1RkZp2O1Li3kb6VF+ZlW1vFZcrkdt
7HbsUTBRhvC7Ndp6lGI1kO6RdZg5YfDazx/eXKCpuryo9qRqKLkPVX/3LXDnmUloFoHI8QMm0d83
Qj5XB9B/RVw6AnlOY1ypqxJsumwdBBilnA3B5O/Tt9dbTHVVL7a7aUETp275bY5ZezhlAYSQT1Zg
W7EdtP7IIoEQ5JdlO8+bvc53pei/XJLs0aGK54DTvscAsdw+oyNnWyi77fST3vcwAA/TXrEMW5Lx
s0swh9DQoYZU295oqdms53ynQw5bD4NnuhFlFyqrY8ow5K9f12LhVWM3Xrys2HaLetOpPJAcwQwl
AlLxkNjnlvtzoS48umYJGvP8ssDU6oJtCnUtw1aHtX2jDT/mW5N4wYN+YyoFv4DKSPOUCli0JoRK
HszYeIzEKNfbZZuyjGssjLpuwFw2aE8kejT2RIcfBA2L29e6WedSzrl7Ss97N0hPjuwACx5VBeY5
uO1ahktfvWYKKJpGUYC5dStBtSJFDQPUijR/78P/gWz56MWz6EFEOUajdQ1CTAsPPxwgUaOWVLVE
L2dW7WW9WXLUNMnOciyXDnFf6JGAEJbAiJH3x6lFE/dY6hpf1J2AwJD19GXUN+5KHySEFN5daJmk
X8PX9Hh/sndIUe6uWVzou9CYus3kk9mdSNsiSEnj0suibcKNEgoxN71hfdYs9dyQk3HiE+mE7iNT
xL0SPnO8qEUCC3L6cU++G1OL7NmHt1yLRYk5A4i34c31LlqUHNieouBG0avNxQVqvZjoOgQPr7ej
Ei0cx7qYcFac102hhCV8KQl2Dg6qepVflPN0HFrHMla+WiFppFbtBYaAw0kznNXhbnMOY+FfIpIX
FkG5SdQVUKZoIVLJWQztclbhpDuzC2yjznsqT7xehJxiVe2RNP1ntOFTw5mmhRNl3jN2v66BljOt
Ypp881jshjPreEsdygdWO13UHYWvYN1gcnD7OiFLXLgsSd1IxroVmZoCCaRSQS/uN7Rf3kxSJ67+
jZ677ygKUAQW3nztQPuS0tJRTClSPGiVdjA1LIdh7TAfYRR99plyAFX7eTogJyAYtuFySk2eueKm
Y1PwsYHjyQm+ORnNTVDNUZtdhe5YrY/Y0fdvWC+96U6OfiwRTNTNL3go0hYKen9guWP7dhHaKb5H
lu2LBaNRSTeSaTbQdBPjZcCyms3iY4k5IlehTdiL86R/4eNH+u1F4O0n+u1lEghYFlOIFdbDWDYc
QxvRRwgL+/Qj4XvyjrhtkvYfJufi84/1gHkeemXOGdyFrosxdj61S5T4vgcbzyHhIVU+dF9ZjOHh
x598/CnQ1rLOOwTAFAjTNibW49a7UeMypYSoZXRAF3W9blW8MC4Bm9ckwrwDR5PoYfgNd95uCoMC
nSBEGPcpjeFTty/xZbFc1vEJvicSuHRajS82V3wee0lYgHfv/ss3f47BVyizBt0YePdnr89uKYHc
iH5Twhzc0ZfEipnhUFo3CfJS4aLEODiy68sTyYTihIepKf873pAubsrOSgung8CoxhjQalVLgBjc
CtTzV7Nnr7765hcT+vLls5f85eWTn4+4rCQK0clCOV/IJFKZRGCzb+EhcqwS4zTCz0XZ4AdH3yxb
YHxXsJ6AZ1WfPJR1CMytw7tQFLOYXoyRMhOdyJDCbuHyiydsU6QZiqsOYxQiO/dCjkafR8knk0Mr
2coqX8/ydkbXjzGoEEolTrRGWWVUAArbhdKRkSUsOBQGaG106rwL5Xjxr9lXwcub7m33usU5EYeg
aNx2M0p5azb7wfzbds2B5Ns6V5EVGdiqRelhzE9u7+/XDfojdre69fq6klhx3v14ntiQueWbunum
SL1YiBjw7bffRjzjqS/irq+NzZfSc/P+QfFFM1qVxYIC1yVQEs1N6+tNuRB7O3zrRUAgIHhhemBM
nJHLG5MVqYtzeJFZDC31GC1UR536/Q//olnvOXwoyQnFLvTwL3YOnz2pYHEOXE4UBhCgSBA6Ziva
hi1AiI0tkICD7AsJGUQvWnFnouQiM0UQSRpqCJjX9oZ4tb6o2/LmBaY6Yk6Y4XfMu2mt3fklELms
LgymNmECQDY7nx56JDK/5OyMuCzay3KNMV1M1DaKxUZ7LmVFc6jDfQcs5RZjuUi4Ho4WlGOw4DOM
iOTa6fkVWiYkmsMcRTOUy3v98bzD5htglhhVJrNHob/D6mFVBTtXLhL8MOi+UG+px/CaPlM3TP38
uJcQ/UbbB9+DRIDx8OfTZb46W+TRzXF0w0SNkvEVRuY9Dt0H8wqFL4CFVwlsXTSjpJmAgDWJiFs4
K2WfmkSadmUrPGe+CFGvxUL44Be4Rw571uqsXpZz1CauXEZioqaGe6MamiivpQaXhdWT1RW+7mqV
cmi58LYdSi2Oa0DS+WHSUeoI0q2KHsSUZXdssEfSG9yrpS0LWU7HYNhO38QACkJDvQSVf3rkLyzO
P+jhy0SW4gXGeE3I2sWZWp1RpD1GrdpzKWhwfNJrHhSrjT7yhz3bKPSc8C9mNwLCi9VBY+JZMeJh
G8UPYgpMtrzObzEQHYMgqN6qXhot3wkTJM0B3SwB71jRj4SwXBSGxw4V42QiVBTGsak6cX8r1p4H
dd5ggl2V8a1Ys8qbxFkG8lv6UQXCTKJ7O4k8B+zdk8AN9KhfTAM2zxLF1RUo+uYKZu9AOxbxEV+e
Rlq2ADpimCcPHb0Zn+m2XY7otK72837rsq24zauEoXpzx/YZrNcBejgaPX31M6Yzhs7yNe4req9D
mdrb7kyOQkAy7YcMxgpTKhHD6qYkwYXNQ+f5nMJ+Kn2AoqzRcmPKRXGA86hZTG2Zc30d2NsN3CmR
VQhbX5ctRdthMYmf0XefrXK42hqZpYQxT8UXpyk4uuNKQCHXnc2wY6C2tzomGZAnZ0SEL1Y3HhPt
Na2SD9Tv9Ni5wzEjScTj98Pn73YAIKxJ0b9CR+9DKea3gjcCvYbtiE2e1NQPPczL7slXz5+/uDv0
5QD4gUE7aAyIoYOiKDeTWXKjNrkEZNBBOXQYDKuuWwDZANyqod1/D/k1JMN2mScZB/Q9zn8bmWyg
zgmMWiPPOIs8Bq7FZarDDvNKJcagWFFmhkPPJbAgLV28iylCLUiPGM4Rpw1YQ91YUuSzczFqsImQ
bP8c07D1a1/XDUXGD0Mxg1IyLfD9EjafA3rTSlhNHfzTVAZFSwKOO2PTO+g8bxoMkahlAM5b7NYH
PNYMBE9oJGhBbu3Lii9SoEpygroV1BprK8ZEvMmhUIs6o1Uri960mI3+GriVpKPP8Sew5/WASF66
iA04W8pGOKgmAo+DhubXCy+nkbUTqcTBIZVoqC3mLI1jVgjDtWMGo3TBtv5+vgMz9X0uJK0GEuG6
Uligpyq5rc56SzW2CW6suP8TCk+ir+OCoGiiawrJQ8vPo7Lx9jtBY3JHrg6K1bq7lRD4tDT0Fjx2
Fv1l3l4OxlnDl8mADjCbFe80t6C92Vaoj7R45k1d+9AKeE/VyALVSLZwBrTT5MHwj7IlXrpM9rlg
R7bAh8EKe+6Jzp0nn2UfocGwfegGGB1Ej0rjjOZyjiw6VSO3ACy7XQAcCvxsN1ptu+HdgH9+B+DI
jfRe2QdvqatkSsBkIVQmhscYC57InML9IlvjFPCgpiC0OMy8VGT0u9AVi7GwaBO/gzZEtaj79Z15
C+7jvmdw2HhnURXoaFqwxEjIgsf99PYevSoepsC4+kx/2ppiVb83SVnn06OJ5PWbSf6l0O7PtTD6
IM4TMB+93UYJXaQzvylgIRtvQA3PHNTY7ZgNWX5fXyJsasne0EtJO6ABgSjBgBaDexwryWTjATA4
xKCpJ2RiEgfLGjVgGXXNRhni2KSSoKYAk7yor4PhhoIU4Gwn80sQy5JPP/2pTEEKTdbzDqWCw58c
Ho72s0WpTGmXGxBpsmaFmPemP9gFd7qdX/tcrxs2KK3olGZfg8Y2TLlY2oaeLeYtnLxB45Y4h+i9
b0LbIKf8XC1+BMxkfrmprijjxo8efvrwpz8NM7fL4mZRXoj3LYJgIxLn28C45j0Df2/nCm5lou8j
RDy6yykYdmhDs9TaoLFTDYuOxdrL/GgcJkxTjor1BRD2AAYFghqEfUXVcIkGn1pMGmoZ1KZJzwM3
cTfkSRRW08OC1Jc1RmHHiJPRJfwH8o/y+bnfUKPj6L7u5sSOB6wEF8rtFzdn8RYHY2ZMR4EANhuE
c06W1ESTSzoUggyKh1FvyWGZpqeAEkwFNutF3hUJALOGgxnilr5fdD8LHeoKTOwY+nzIRgpTxocG
chiqw7G7Go5STs7rJchbyLLVNeC8udjwxRMCdYv3KMt6wwDQK7Rrj49H3vDy4wdtvSoeYJkHXf0g
f0BLB/1J3II3N1sEY4pe36vg/XMqlE3wvrb/z6qLWsfedRSb2jTF3vVUZVolXVgWOvtNP1C9a6/t
yUFX1570c/YbS7EJyvEWddox7gnLE4W7icbIxBnnhP2EuFfAD85u8TzKE2HGDEuB0nV9QGNnOLF6
FVNc8+vezhLb9aUQxZECUME6YQZTVsDOy4XOe8EuWOxXdnW9bXdboyfd1XXWFp2YLhK3Ty6u9kr9
1RHIExrDaWgvCEf7Z423bffY20lv7sjCgxTjO4dl8cBhGfctzvjeirtiscd61HiLGlDDpZzLXLGQ
gJos1Qesrl6d+lHy7Xd4P9tZ/YEA+e58qKqaks0seCXbYh1Pov5BhbuEtE3AaXR8P1Hg2/sJVocP
Pe+tT0rWcjOKnFk9zL7xq8e+24JsQSR5XKPJKcZCsVjf/KNBL7oG8WSPCYjTohn0yLo+j4WBP3cY
9ZTsF9iYcnOcxGlkJeSk6+0cnFruujt3PsgVAu1ioRgcfH0ISIumNu5Tn1yepwGcHJ6m/RyVBoRM
9yAQ96SJQeLV9nQfsVjNKN97gJr2nfjQiPSaCC8p6VOYQQx3Q/naqXpZg0lqaOmGy55H5Il3cHQ8
uC85vFjWuvkdx2GWMNi9XSAxWZ7ZU07K49OhnmtcOpx1uFVFLIOMN0w4yG93AoVC6QcgYnjPQSoK
bjxut02WDcMviH5F0DOpbrwcnE72QOQcxMXQG4QGRDmylssoxmoxKiCOZICCNix8EPIydRQ+PUKd
foNHaXgdEbtP7n6YtIGKSKIkYpxyVRE5CR6wDdrRbbbFp2xqlVp+PY1wpBOldcBvxzAEv1MO+dNc
KF5zagPYJgRhgBtCHlq+kxg66m+DgD3Vg9Oew4o02aDjLeAmke6GXVPYVoXdHTAfGEN0czFUQvoi
vdozvLE1jqys6Jb4ISNuFBiNtHLsh1XhjsltgN6Z/jaV/x5SGyhxm0r7k7DnQkEZK7XFJetzb6/Z
+EGA15lCOqzbA9rS1/7VQU0L8u1joruPHYTfSfweEBJ6JpLQzq9zz9P9pWIaN5R4HtPhDhjqeEFX
VBNWIhnsPD8XhGQd0uE5GUE0JjnRBTnhmmOKQ75YFIstxjeGFTjuATA08DTjImybS/d0WILh+LZN
/2zzPG87i/V53kpzUOMX+88aFR/Qj4gmmAuJS60/CKq9yyFRXcgxL0OGKr7BGdLaB/Q65alpTqD7
5jjJEATgAsmBMFs4zM14UE8yBxsggVPR0+g/WI6g/fYUWCTHfeFSWQasDxqyoVXDEfj18ZcKw8/Y
I7xJ9jgJtDi/5IByIOUn44++OEHfaG1LV0yBT+PbTo1rgstJzsvbuunsey1qBZJTtzGHk7N9hU7s
67ptyzO2IUMPVAJHXpLwjNLUuvwUxGZdC9vbsuoQgDoux3pYPHzMi+TabjvflREPWXItpNv+8LyO
1BLk1dd6Z3zmymW53P8kV8ddcuYuIxeilpMXIsDAjqmWvNs3KhxWfWnFOCs/3WqyO6Gyp0OU7EsD
MEdTdTD+9Juv0aEIiNbpznednZ5qZ6tYBNxLZB5GUV809mgM0waVS8ZVGlrKOq4ZF/EuCyB5Etk0
eLcTf22XaNGwusUjlhIqi0y5qRZFs7ylzJl0WsUuFgHXWBV3D114KBGz8TDtytW29ihxJdtJRTTH
CjorLjKRXQ1SE/Z5xPpWeIxyTKQNP7TJY1mW1tEZhkpnwydgiATHIVZd/eOK1jFZYB61F6aU1ttr
Flag5GYVlvjhlOt4yZjWt2QpLxbOaHu0hoMPLGiojm8G6vZVLT7ynCfrdHC7Wbve4Hs5nDeO+cVy
ThRh4UbcX/uZIETMvvFQKlXDZXdKTkO+7UcD3IuA2l7UN9obPEnDWizuY2W1KUZBVflmG62F5v5m
Qp1It4Mboko9hp3oCJORQ0q6K9YZORK2S2M+GyCWqk9FrIXonvWp5aMUU7W2Ld3UJ+LdfvtW9iQt
CysoehCLzWqtfDMw5+1ZWfXc4tfl/MpwSNhQax4NOqghV7OH4h2ZXW89Mtt6YM2tZthB6ds5de/u
51mrKyOZfRQwb4ga87f6djImR9aiGS480JA4ryKqve4tBTVeWp8fuXFGByeHejTR0712Znqd9mfZ
DIZDffBgFnmXK43vervGR9WogpnALKTVCTbMbeeSXRL7St7vX7WjIxuy7PcXpEh4tjRYtme3XdEm
OKp0n3Ma41GHScPbNqL64x3uB/1m8V7sUKt37yfNLE4MB0ygUncw40pVu8Gu1j2c0H3VC31IUVTz
Go12yeAx98qJwd2PoyXBZtxu7rsUZzzxMO89p2chsKlHKq5TKhfhMoP+0v0q1kbhe/dIAbtj6RYn
ocN9fO+pDnOdcGx87f7+5Ntnr16HrFwYVAN300VJIaRIRn0AAGV5LuzM690lbssPhKizADQ0wy5z
WFecmF4SLpI/VoiAd4w5HPssmPpb1vk+RmZhQ+QHnbPt9UB4L+02rHZFCQxcaBnESC/oOWIpzXAI
t/VGrOJ4b9F3OaBTHgoqEvuOCpWWzRFN5OG1wH37rHD814bNz1t3BM9IDPCAbR+GXf/XA0QZOEW2
OGu4js2Zdglha7V3x6lewKHuWWqXupgyIZoA9sIBW9wJfmmMjq2oNxztpndhToDgQkc44RsWps3h
S1xQxtOAd+eQtACrfg5AMMfpZ3XeLCiaSLMJxv/z6+gYPsEuuEmZl6F7Pz2ELocxegeMLYMoMxNd
dEbnnbD+G7JuQbmA3svebsqWJRII7vkEJ1ZGAw0pIUGM13pq1iYSbtxy41gHG0OfLrnqgY8ts7W5
oSEBMPH4iKJPoZ+HaVnHhVgUaI/APWFIvtEt961mu6TyDVbtGajx4W7CvBvQ5OBIQe7Hs9T7z7Nv
/unRV7+P1iS6M9JGatq1zNOBa1fWXXbLMbhmb2rr+lCNCdH9a0DbnSVry1qlb6ZuwcM3z5988zoE
wiHILVe2t9ra1ChGEg3CC62ikJS3s/n1Yos9SepFUhFdDueXgsLWXgMoKCw2FN2qsy79cb0Fxj0B
TUOBy6LnFLkb2ddshvIBN8d+94gGADfga2/QzDO8hYo5sT1iYavQCAU0LPu2+5JPwwZxYx1OC1pg
dCDysPe8fZ+7zYZsboZUlI3faTjEEfN9TID5nLSNOxv/ctf4N5thqPDBCzoxZwq+35is0uqM2gLR
dgEIto2Ug6JhS0WrAgWpXr/Y0WuvtfXt+upCow+2nSvKdBHaMSzV+8Vtd4mXbPP5FVCr9jdY1jVd
4aPjZVf8YbAZ3wjkH9oCjdX0zoOzMfL8OoWdbNYgly1aIZ62w6t2moTySl+JzNa3vdOX60uQkoxr
D+4mNIYDdoLG6GKuVP7S7PPk8hAJnsgNi5yAcePD8IYo/w7ImVLF41E4VNGTlEGSHI0S8SzwJTOl
J7loC+q1ji61w4Y3YAY0F9I1ZhIBqy3Fx3dwRTC9Imk7tmYpVsd6Q5E7NP4Ywt0GonZJBrKfz6Q3
UBRvBe1bT4+87nktm6UNCxYlF9D37SXnHk61YmToHfO20Q8lPha8OTk8dXukIqVKExL1QhUfw87X
i0xLDM54iLSOMIA3Noy7wtYoN2tcBJy0241zQ+4JuOLxC3sHuUQN+xlukqUSGpa3EVk/5EJZ3R4o
tykC4SnOXowbufu7O5xNP+QCD9GNuGDsU1aYShrI/SZabVriAHmlBkGBGglO+mGhb8InEVvkGbqQ
oYLXuCahfWpJ4BrP9ULHTdBzr7cDZbwU6g3pj3o5oCGWeDJDQwMBbxkSmTFwSGTztom7JwiTZxsn
bTk9/g5FYFCbORsm5vO6oSUgcSRp73HqvAaw8pKubMNcrjfdA2wWOrtZ0wTBGuEy7VZCsmwxQfrx
ZFjvLEktXJDGZNEyzsYT11I2sJn0kXc8CvNQ3mj0fp8G9hdlU8FRDe4xDhVsCwzhcTuBbwwhfROq
dSdJlR5274QSEjBQJunEr9M/zleH8hyWvrhGZ89pHKtYOHt0UUGgz55n9YBra8tO8PgNvYnpBpfa
BwducTH8db0OXB9SEw5QsnGmj/n3uvR3D4VtJbi183qNiiKJUqv8igI0iKNK8fuce9dgvG1ETKVD
l2wsApEqLiy+2mbThBQ7HYUMqPY+P7YEk/HgGTM2cC+6BtmPbqzRqkd/2O6S3GxbfLXCxKtDfsJU
mYJH0JSt0YispgNThnQ1MID5FWnZBN0fH7lCTCkth4pfE+Cm+OLk4NPjU2wriWFMc8oYt76tQ1c/
HLhU99j3eqfzXHlr5df5G4zhiyrZvmD/7hTT6KH0N9BtA9xy44I6zgSFj3XcSf3hzkkdHPrD09Ee
Nzvb1qJZfY1ewOw4VgiYMxyYbtwCR2xpg+Io7zXUlVDQJFkHEzVK8asbhWl8iyV7ayijXQtv5/Um
Wh6XOV1Rn4OQVa8i3fNFjWpYW2wWtahtA1dbnQQAnJEGZbgwy5BqejGJ+B12+AgNkCln/5hM1jEg
RrFmj0hHINNJ2YaiMw0cMxbL3TMQtnkHKMBYlG9b7Kd1QvQeT4jWeAQxq9ddO2SlwMwnHGOcbg8g
kA2F48PofRgMTHzW5FRs4t3VLSSwP0eSkCunlL8CeSND6+vtKNips2OQZLReUVbvSbRTt9Eky4Lp
S4sRncNyHsdF3pxpuHyl+wW7Mr949uKJfaPqPUcCNv6UXUPu9+8t+Vzj7iRmPPF9OvcxcAp67DSA
fcBndAB0ounmlP3a/ZmxbkZgK7jVI1hsC+BQ+MFNhScAhRs/VRW4zsvOO+ANnJoz8F6QK5r/4LG3
7s3Og2/SoDpk5Yd9g0i4KzC+wO5gBh7sDrzbpzuOUK9Oc+erBVJjxoy2Qa8g+mMW9V3ygrszNen7
R9N7Z3nSxbD5smVf3QnLDUUjWhtdvthi0MudAFbn9aZa2MY8OZbhVeIaESwHzRePXv+De8eJFH/S
3rg3tl7hzmSnVTC1SGF9i2817WCs+MEo2H6Yo49R3rhGvnleiVWORjARy12r80k4pfHI6Rh4RUlM
AibyLEfXbISg4pChfZMO5Dl0Ymj8mBUFA/RiFSw/v72AwoO6Il8JChjQ1kyWJjJ82Gt357GwPvbd
6izDl3G2WRG3xj3h6nqTBRmtqN6XTV2dxGh7jk/V5dF/P3xRMY5ZlKkEGuUszdyHW64cEimoZMtD
9yEH91/Zx2E26T4bRQ/TQ3j1q1evn3z98vnz1/HpwAXpHRLM4EXtPe9TCnpPmiKDLSeJ77+ivr6E
vt6PJ1bPxXK4m7ewvZmikjH40zv4Lm2bbljzZrqP497V93yB0thJHJ/ufXlMan3szgq29OTb17ox
UQzcy8yOcW2AhOhkYrFAmQQKcWMD4+6tyZvUqNoUoZgtYLjxEsS7U+E+a9orL8z0eOesi7Yg5YNu
03eypm/lM1s1nUePHz95tec6sb0rZJ3i5oaXLVDHXBXdJdql+Wnq3qu/rDHTVIMboZ3jxp+AG2/F
/8Pzr59YBLp1fQdn0wM4RoBfvnz2T0/Gp3wVyWmKF83dlCIfK/b1tGWbiPO1hQMPX9Ybwdk9tTXd
oz07X0qcW21GxTheuFj8+L28sFwYEux4Bq/XyJi8GfDODhlODEJ8gWI07OPmiM8ZeSIqABTjYF05
/Nq0GzyQ1j52trNxOGa6tYKVXigQUdbD73SSzWDsYXhItF+5nsv4lMUvfIdua9uErReWsOUEKAc1
o2gv6Yx8D9TgEeh1ofFQbxq+JxcWPiRLnAzcka65ywO+MvISMUGisZnjNESJ4XMHjW/BFUXpmKJ0
LPBTwb689xAvT22c51fFjHMgQBuy5mE7bIrz8mYK+iIdRh3E7oRMoquiWE8/2SaNA51czfBIn1WX
o588/OnhYXpMhonuuo4W+W0bmlZQot5tbB8Z9kVXiRouaJbwMCOv7KC3rm0vvylXmxUIknhOjnqs
1MaDs7bdrFgw5vv4Wq/NzxEwD713rIIDxuqYptKLw2d3b0muD9i3BDoBDw+worudKxGdo7ENX1/8
cHpyQidTpi+c4yRwd49uPGABwqVKhrHpOJMCiToJu9GS+iCniYyj1OkwSfGVilsbNDkDoDMnIbnA
uUMUbpOvLzmrTvC6roJxOhh/2zjFD8llbTsauf7Dm44xoihJMCPERkYQSo6obpE72c2xQyalXd52
AMU/bOIAan2HT6aaCIOPjIYkT5l7de0xdObPMLypD56pQMGtx1ROpzCZF/+YYE1PZLzHBx9EScB6
www3OLUbJmU1MPYmZ0TivXKMmUetfnyU/r58zMP+5HoXkGsxAUAl851VkVfkfQkMhu40b3j/yS9A
0w1hWhPCVPB5fAdroqEirjvaS9pkNxKbNjfoCI97+BVfBJXBONPFDqQel4ryzqFxxhZlRLPHaDF9
d3j4Rg4HcLLFg4V64mk7q9s1pcHhqNqY86Gnvl/mLV0lUkAnUWzd3Asdn6iSzg0/oitsba9gRhqE
XEoaqIwsF2h8xnibqVqhRXqPT69gaV8ladSW3YbMPRO+tqNcuDSyOUVoiLQ5JitWIIRKhNvrUtg6
E7qAARaO+Wa7yyIEqWyvkPW3RSHulLAuHREK/mvRXpA3QPhPKeT2dfjowe+VojXx16IxJmUGC+i6
kF05AEg7M5OBu6GwAFUJwp2Vr5Yhpllo1VlUhKZPmrFtW8v+W5GmBwkfvEf+hx17kOOJQdkGAfYF
3sBqkgBJpc7KhknV/qtKOBl5AszxH2NL4XMCePbZVAlF0QF1Z0CRxujSLEUMM4m9jAIdxkpfnqvb
AnK9fLA0BkrXk7pfHTENOLIu2kXO2qR7eNAdpdFnW3jiEA+nCW2vyrUjaLJrAUIrFvuZC3Zb1agl
Xnu80lDQg42t1etOx+SOl+fx3adA/IppgXBY5x2mwG2XS3Zvk9a5HvEO6Dq5EJBTkoOXSfRLjp1E
v9BfYLtZZeQJOZRbzKrUwwKmThBHDNtw8ebVk5fxqc3iANLmZhJhporld7CdbGnvm0dol8G2QhG6
d9pMLMixCMCxwUfbzCM56N2QVcRshJycpZmfHMMfFXTvIKYTNviEvwr0lrsNbbap6BI+wutdanj+
KtBph5mGIIoIkEC3JlEQbiKAJ5EfizmQbTINNO/r9BslTPY0bl9H999L/i0T5kF3mpt1Q56bYnhk
1swlXRyVtyDpeBMeMB2xGXrFIb6B4C9zvKICjOECZQM6HaTC5zj3NMN+zGgH6edCCZTebJ+b8ruC
SxM1fcf40mEPa+qqCHh7RJPmhG3WrcFeMeqrvhjMYa8sb3DrBEv823qRrKjMyeFpBnLXcn2Zc25p
ecgps+N0OLOCE0SHfe10tLfxbIyhKtNQdg5OxiqpCrFp3PLT0bs/f/PXlPJYDmyVU9C7v3iToC3h
ErjtwbJ4j/4Tm7MDJbpeggywRIkSLQbv/vLNXyCMsjbV/+rNv8PqZYWuo7BRoqJyWSzXus5/9eYv
Z2ukvC67rOsrtLW++69f/z9/S0mfI3zknpmyzZVrROvl5qKsMGmynIqSGwImGs/WtySfyDm2Kpmx
MWZ0Lzr4ff0DWDpfFPXw9wp8xB7HONpZvlgQihIejNyfMonn0JJICp2MFmSKfMH3ddB4ShEHAQai
HvVCghW9L3P0I8Lol13NfMeGrgVVbpn9ZlK6e+T0TXvqJaY/QnXop4P8BYscfC5mXb6LvMoXRXSx
rM/IYJ2/z8slLp9IFG3SAXimNXyy+8H2T5RRtpEMWrQG9NbAaQeaBCqnu6nM0tAwTiLuwqTQs/s/
XyEdFzMq4yKYPFPa3rBKnSSMXKvPywsxWU+oIVGzrLS0JuxGqM3svGxak1GaMq0EOwiLnPrIbfqd
gw0BSqC3yiJVEekdpJh49YIyxpLqc9bDDFAMF0mo3cZGBJMGwqR3BxzKUvBO6w+wJE8FR5QyopWr
rTSTGAqOuqBuDJRmG2ZF+rgpzo/fClF/xp91syiaz99yIzzHQgp1NS+US8YZdLEir3iyexIpgUIl
zR9joEwe1XH0usbFEaSgCYHW3PR4fXuMnYYuUd3MoAgkR1ANFEPjIWcvvFKfvzXipbSKaCIrCqOH
12OoHShIjWgAw41BUWyJSj6XKcGbWUuys+PNDLrQRzYxdvRStgNsmESX47cya34rj+kD0K9IHgh3
jeHp8EqJpDs4sAYg1UBjF0TRBjkA1hSDBoQp0R4G6pIYHqyswIEJy7Z1ADG4q3VCHfIo1ehIxWbg
hlFI4b2GbrcoEicFU1F24XIFcqLJCd15Qw5ESPF5dcvpQUG8kILMknGQb99Kz96+ZR6mJFZKS6oS
DXIHF+hYzZV4UKomdOqmw5AUqL7KhBM0bMXhFqqmbnQ7p8RdVpiQlftU1iDt0XzZSDFCLO+xojk7
Qb02F+RV+lde/3xrkwyWvMGrqy7QJg6JdzC6Y72pqHPYyLKu10E+S3LBDjaLW+ZMWPsMW8KnFKQY
uECRN8vbmWK8Pjs0/W5p5+UUiAQp0pAGcKqGNIhQPsMKMSd2BT4riko2uJF96Qf5iMg+ih87HZIk
T1QdOz20P8Ii2tlHZbkrLNtmybZkANuHKZOG85XIgeT+lCSV0QePnSwShSKoxvsJGxSBtB1qsFoN
EsPvVzS0ukPU+r0Jh6ahAC7VvT9ePrpHMEldDb+9mBZS311DutZOQUWXnLH8UaJToOrUJFLLhx4P
E7pBWy7kqROXTsijULwfoedNcUDSgJYZCTSQ+gGpQ1l/tekeMokMU98evaFlpdII9cUnSTkmIHS+
UG8F2dkDMa4ZXnOuOvI1KWFkTPkq/t55bXcpM5KUlomhGnrn8upBFiDeIZVjRYZmSpz4mkYLbbAd
A9ewvlNKRGtYQnBQe1LETB9EqeymbN4f5CNNTkmZyBveHGK5o+eRhUjVNLdv/+g4Z6BrOnaFbvgb
vOcKfSFTeCgozCPY21HPqqAgwcFY4uSYTxzbVQ7evuUmYcPGW6HqlrUoq8v64gLxwBuei4HASOgk
PpEftb1F6WfsI9FqOCHJH5eRvC8WCf6yIF0X0W9QXtcFlHiN5bJoYM2B8oI3Z/kj2C/FtLf2bFG0
hdWtNrxt6O60kamAftsSSKsPl2x0ihykryE0Kn769q1+m6kVnr5962aNfswvXhI4h1IDzf0BtiSJ
TMG7v9K9VK7573eTWt+q0eLQ2ZiwY8XlEd9Oskhky6LzeKE667aIosjnl8ZlnpAgl5xtAEWINzBM
vYrPCs7/IXHarnMM28TaisTitKDzqmU2TEcwi5rMcdQTXvJW6R7DDSFuF1tz6+A6MLSsrkGAyhFg
cnypDGs8sBYm5T6VO+o6iiqn0NvgUb1MS7al48QbdnYbiVIFUcXvHuch3di6V+EQcuY2rkHtavWi
qIoG5mzG8v6q6HKsazWrSkTJCmCUIOqnSLSARtDAyAgEzbTspuh26XuwOFJnYHtWUvD3t3y9/YC3
Fvw2oY3N9uNELFka5YQcqVypG/SrCwKRZkPqwExJpaqV4qbzKMBStCgcmlRsgXzXD3AaHnSgqi3q
a1fE1fIhMwy9PaD5eL7ckMI2z9cdh5wqVHA7lptsEYlZtd6QbWsSwjs2oCVDGXqiWj01vUKRTgmS
NpiKlCaEhFLCHPZDWGKLg64+OCsOECNWE4nihyWnrgwci5SMKExNtgIJCoS/CrUi5pQ6nKSxUOD2
UAfgWBYpd84UvpW96VhYyVldL4u8OtZ5sasa1kVDfissrTq6vvKFsS469VihTya7VrZPeQmSbbmY
6MDPFmG1INiCckxIJx9STIe8YQNiHqEouiy2STkOJSYBvmWk27c+ConhUZW3b4chm1I9wPrGJ8uW
1M23b7HsNoBq5oZXm6MKBbv99u0H064iXEMXAbIz5TFQooLYJ2CxidGuHKRfJbcVNzmeQcjQ8fwI
3ZVkibNcW1RkEqWDP2DvTWhZtTXv5Qpp7PukBQTaqIgUDtReMGD/Mearq0LETp4OBBGSiSwLrTJi
Ip0WTfYavrOoqYyzI+UTZFifVV1qP0MyQUbnG4A98I+hQ8+q8/rt4Lo0Y7jDyhzSC5QZSbbUEIfn
OrwD8j0BzeeNUTpag+JOBmP7ijUN7Ps4EJTOklHhDyRaS2vM4vr2DOE6yrRDxtvUnJ/1+YOUdA0k
EzLmoQfVxta5ri9rxRfRBVFUONHKf9+4tXRgklNLfdQm+TjY3+L7wzQ3Q0EM0aM8b8xZXL2GbaU4
x/MQ9E7qnRkWN+tlXuU6civXL1vc+UCWPs/LJcdIoYFA6UZmVfirHXGPct3USli3INteopphoP8f
gUAnUAqO2MoNCvmldSWJVn1Gxgs0NLd0iTevzAMC9FFZfYSbIkdaVLWLFiQosvaakLXIBhEEh71t
0GiL9musQpBos1/weV+7LC+6y+XthA15lE8KscVRrH0QKqJ1u1mt8ubWYq7fF82V1flyU4BOwtEl
RQxMHP8DYZkzjnyYL9PvjRS5B7PLIgdtSFMh8QB0MApsHDJdjLhF2QLV3PLhEQPBAdZiPeHem2H2
DKnSPKlMxBP6DNzkN4JN4AD1gou6gdkFOa/plni5rCHx+n3RnGH4SQpwfk5GXbvVoQZ3bTFqEDOh
kEQ9YEjO8Q2ew+KxKdJbjhs3W0bQu1mhQqBYnfteyG1Rz4mZfr87hrQiHgVoiKLVlshnfxKVo/lC
JXoTmwDLFALOmq2BBv4AJi7yBzXOSXzwJaesi+JsY1lTvz9bF526zZSTRLEQr5JJ1HPcUWF/baem
6KJGoUZVDlhnZUA01gS0VdyRcIOe4xIOW0Q1EqhS2wd6JY66DB09dZOtAFX5SJfvw9SKNAMFyZtU
MBZuJ1GPZUCLIKeXpBQuKV8RrsGH2aepavn6smAHobwyanp0DUyMvIcXKnYyvF7XtIOQf9EZm+FV
L8hXgYlEK9/OMQpJ89IkpfQNNibSZm4SUlQWnnWFZXlVRGN0/s502PyxtWG9++s3f0UOeGiF1O5z
/80bQsmm4lMCskLKLeN8XbIH3b998xdKSJcV9+6/ff2//zU70AGLn9fvhcmicCZFWslPs1HnQuYw
mNm83IdlwBhrf2TvbZnYAaXUU/YxeFm821Dpp6DuyDNUFrgqjg4Dl2ZzCnXKNV8La31JxIt/n0If
vhLVfB9HpIum3qzVBYYGvTfoSTIWw6MkxqGHlqfO+OBAUHEgaBibW6/smTAdg2gFfAuvho4nykWB
ExOZsujNOB37qEWZB30Q+7DRt3Q6lrLq9c4+opPcUAetvo2x8EdZd4OxMdGI+T5vpmOgqbHfYd1Z
oik7hgySrobItiyBGB4DdS2904maeMZMdTBm+j2yoq9klCYdozc6UekwvAN76Yinj4vHoLvsl1zk
68Bpw0hH7Ut0oxwMEMZLwQBhp4pTTk7KXVUaFKodZcdROymaOtq/fz1y7kdI3IqKsDs1LkjkRJS4
+OtHQpd+v4ZO9fE4GnE0AloxoDZI4cReTwKSI6YzQ1H5TeCtMn9NSJS2oxFQgDy7CN7gtH66BVlF
mEZGM6CbcrUSmlRqOTuZL4VNQAFexfilyt5liGt6isn9inS4b5nVUnet8SL4QLNGYpk4hlHC5yGM
3UnE1g65E7/A6n7AAs7BazWjsvBquDZEfwAEHtFKX9yX1By61eOnlT2ArSV+0HsLHn4kBoSTeQCU
VdQuTRojd1ungiJ16kA28sBJisa18MKNFRs6kZIZoAPtT0+5pX1ieqmabyrc9fGAVu+Oqef9LkVl
GNBN1RlaTt7VlxztyohEp1ImL0Y962KvpDNyvgKwlECobDdSv3upRvElEi0o54MRpOT9tB/qa+CO
qipvQ/9YjdM8cO+Wr4BXYdBUgyqM94jZwKHfXkxUb737O3GihjuRvkwU9NRLscuRzHwiyp6T7/9j
ibXjVnr55MXzl69nb7589vRpv6b9tjcjivu4UUxUZ1OV6rNok3njJzxVdU96sZfM9G0J8yzX5q0Z
mdCDQ4Wi6CA6OoSN4V707bfffhEMtKSYoB7KSXnMlU8H7lBiIRXYa3z/8JNFdL+lwOrlx0fc8EBS
0xIjRh3tSWsmdtiTbx99/eKrJ9FXzx8/ev3s+TfRm29+8c3zX34z4bjel/U1aVh4fYDECcqxkXeK
NAPXO9nWg5fOPv/883grWhR9t/WmmRcc2YpnM90DPfEXX3wB2IH/x4Qganc7jnTXsizrBd8NM78w
70sH8IqTIGsk4+sws0V5fg4qHsKS8Q4zTo9JXWCIM3t9pBL9a/zrarzP3fcS942ZDIyXEYnomgmj
+J44jBZ46HxGW8fwCE/Gb7558u2LJ49fP/kyevLt4ycvkHSOmVR3xM1aN4nTK241PR1uTcUswkgQ
Z/n8KsMzntxSPJOP9hmBiF2+RBWSlbZED1QhKIfEA2crVpuws1OTZem8HsiPQ/v7ecvCIMsn4xMh
i1PhBRy6HfcmVxrSoqTTLxGPkNtb4tGAuLFTSLgXtYCq9vw2eutqhW/tyFn2DVseEWiM6GQNgH/7
OyMnrnToaBG7Z+LObmedUl1GGEm65cqo5H1nCELx5yuUyK1nZJyYspCEEKf4507hSXEbXLYiPKqB
ST5b6+IhtThrGDsYztpBlxdd2SuOo1gu5aGdIhEpC10vutqSaUiYoYn3ws9y8FUhJw6sDDrSjM/S
3he+qkvnB7Sizpc5DEmBf/LVV89evHr2auIJYbAiUbmBguW8Swyap/5o8BXjiRf7pB9NdlZXM7Ja
UUjBiTIY457uErnS84JUrZz07krVKnA6Y8oNiq9c6fvx08WxTOUWY/1P3yrI2ASifjpTsYXBuECl
QzonSfr9LMTQyvGMPAmmzDs5nUC1D6ZyZTHmK4HYh7tSjjX8DYZEMWZfmWLt7MeexasCM/SU7cq+
EbwgAdbf2Om5HdRNKXG6IFqWX9LTRJPnZOuKSR2VmP0cKulCRvfqlXed+Pwp2X03Q7KXn4vWft4s
6pMIH+2xClJRrNbdrbZq9RrkVHi2ok1gTMJ2pUZPIlZH3/13b/5KuxOQ+X9ZX7z771///Z+xrRJ+
gQCNdqjigKx5dBfUO0unUwOkQNzi5Xwm5zDEIw6kTpewXBvmd7Ij9g/CxrDn6gEoe71cxxkw5cUH
B7oGGpbUzwP+7dnzTK5Dy4R3adn5aG/xLHr6boDCIeo+hELBGjSV9Qx1wUs9ZnBTz96m34x0UDS6
qYDX//lMVNWEqWmX6OheUbKq5GZRqngMFLFclVPR5lXsJ3WKGVNtujhu34OHKhJuiiIOaDhQAZPH
H6G6RerD2YbEanMdSvFfe3Av6ftX9YVuVuCnfrXwreikBzTd79qU3QkgNXfkM5tUyO7UQ5stF8qj
TLreS3eMSQ96/dw+vk2lR+iPTFxP6aDC9kQDPlqi5YA849DdgB5IN1SCpRN6iFEPTNoF3sKQ0tQ7
fqJfNp2T7IMdHxakIHKVo2NLW66Kaw0Ri9nQVPgFU2QqzfdYIjfc15g4+5HSMI8D+m2gy9uVMBek
Hay7X94t+8BrXjdNh6p2hAiFDMdiJaEpMN4E4DJOTuJQY26CJacDbgx2g3aF35GluMQxh326YAFH
26/VAmRntWEDrb9APROoPlKwzhKMjVozDfWNwzkWDcW0d/mF7gCFC0HvNMwq29wqQzasa9a+2H1h
TmeZy7pCJzY7ZIyVRYHyl82SMRkjyFpj11UQQX6iGNd2l9OgzVy1tsUiEmhd2sbiwZZMrm4Ycr0B
yVTnyOatb+eQ1UgspqZqxux26zA0S/pQadX6tjYLpnIrdBc0DcKfqx2zZGfzG/BFtAeeHrvZNqgb
eApNCZTwiHgsd37oDXtRh+wCNtf29AI8dc52ONZMpTsW814UhJj2xMpFRadkC9JO4pvYj9fIKJDQ
TtJjgxjDemwo3w5DiWO3zhYcWLWsALN2LVZfv0OP5S3Kr+u7ATp5eJr2uIZeBoqMd9GRe89tmIYq
8u4YxNR2jFgzP346DgS03A9hwzuSOCC6yBzswqutXQCmd3x/Iba/yOuMa4/4UKS7rjD6fMtjTvhz
3uStzZ2020ys3/Y4VJ+hcUFQAkQq73G0YW4mwMbz68UxY8Q9uUi3szVmafEPY47+pQYJutX/8OZP
0cGCUPDuf3z9J/+GdKqR3ESDx1VtMsXS3it30p49x5Prmj2B0dEUq4nm1N62oL9i5AOsr/ZsTuLz
hCPRoZuOHSxtjxTfQBEZ7oQNEkUEOEjcvOXwTQxRu9XcXs091eNePVBz96g2jiSZ5Cpfc9YqAkQx
eHbnMLpH1m1TY6cOn+7MeY67yPjkfnsqCyy5+8A+eEz9vEuj0ewapAskFgCGJiQq8vCYCUhS+jHM
T0IPj36in1LAZ3lqyv7szatfTUB6Y9vEfBEtmvI9MAf0LwZAXz/58tmbrzHM7aqNNhVFlCpzlanz
od2R118+e8ngHx6GH//4J8HnP9JPKW3HhEJtqvSWZyRtfDH6nbNYvsY7Ea6ESwaP/D+WIH6um/p9
iTqyPnVxFijdWFUZR6MXz189+1bWo851kOMN53Py214X7CUdU5E4UhG7ougR3kbZzDEsLB9dWYbx
zZn01lvUxkENPzmk5pQ/OaEGtfKQK5PF0CJY4ZiOi4W3C5pod2M/yyvnEHOjNHoKBx+Ja16OXZFW
RpYhXslZ2B8qQ71NoLLNbBmG7QuCKToCGRzcjCoWQNl3qnpbahX7FMdG3gke+e4XJRT6IBodkyGx
fDq1g/4kdAw9flNdVfV19QQL3F8gX8DnfjhsqkgIQr+AhNl4IvAnET+YDPCA38aGTYMerKKwxgMc
IxZOFB9ToE6QkNhvFdD1u7QvBvRww8Ol/oYQqt9Z+dmt/BrmOIfTM7aYBPDq2oqd5twuzs3FeBLr
iRTzSrZR49SJXqFrUMHmlGTRSZswm+nzyEtY3JQ009HKh0iDTrH8PvqUQTgK0IYbPbcX83Rgz7Yo
a8IRrkApPZMsweqwtB+t3bbaSS3hOulA7lUvUXEfQwGTCQKEN3Jy6/C9wZi5/fT10j87NnC6JU5/
9A2sAMPmPITqlwPJWTHYILYPFCIN0u14eINO3TkyfGTTMi2jwOHhEM/Cr/sI8IOxm4dh2zv3CX0Z
zryxPbUbY7DvlcXPMQkOSID3G7KDILUbqY2DGaY7ycRZSPhvNCrsvYn3W5CK/6c3f61OHGYAG+9b
LWHU7/7d6//zz3/wAxR1OSzmGcjzB/JWXc5SUQ0lE4ypTdnZkLDQM3zE1z2t159PD7MfZT9WcUrR
TfqT7OGDT7JPoqRe4tkShbWhUKYgnowok2O1kNBWFyCe8yWdCd4vnT16+fPHz9ED5/UT2FveZ4BX
zhNZoFd3i0k08dYmRQda1AV7LGGfIp22CreG0cgegC7YbtbKzRv7+TD7EQgTS/gu4QDRwFpdFOry
Vd6NkLZXGH4xBZhPFZfEKPZWA+pkxbrOLX70OikPX0gcURscfDR6JPd4JYQhR6RHikiwrdeXHGCv
VY0QGq7R1wjDb1R4IIRe/tfUIoYeqCUvQd3KnZ6RCX7pxJTFAAARhZDCbsCyiMbQT6TJB2O590wP
Pnv96Gefg3IEc48BAGtO3ilnMrpONOYAiPW6M3EbZ6rpRKIMcirlusEwRShxoItA/FG8SyVIMz38
qYuN0eg5XvubqB2J5kUNcxLpcvqO/UWtMjdQwqtFre9RWt2VqBYcXJDvNsvlMSuwaRuZXo10lMKW
c70vl1mUYEpSewG1ksdSFhHf9tN7LsahNMEOTUJjumQoKUzxkjeSxasXT558+ebFaMr/iExUdAy7
yXbelGu+seQt9lHyoOjmD/DpzDzNFg94URxYULL2ErQeOljWoVCAmV80+Qr7tyj4rFCiB2zWqlUV
O4SuVa7LdTb6FZAs3k8BLMJTKGkxn7ZeFUTXUJjWnw7/JeC6WiJWIBO8F7341et/eP6NzSlmz38x
osvphYqh6w3kgKSjgyJvbw9kFg4EuOINI2uuTYQBzanIHsFzoxYC2YNQXgL1i4NR0KWY0Spv8G4k
89FwZ0fPvnn1+tFXXz34EjS8n//82Tc/VxNq/o1e62ELNoR54QmwJP2xaZ+ax3htdGscL2PoaRjx
WNvj0QFd+6OULKWy1QxhFM+WctauunqtRk3RCIUWLKQAZNVZa5XpGAM2c+NW+XqNs0okJZ3LWyce
F8TKE+GhbkkOZCA3v+0GacuQA2uOvLwg657HXrva458meHDsMAnYaSmRMd0jlsBGlPCXbkFH+XV+
i5jG3GCgfqJQbe9F+j4e0QdsP0C5eKJ9vlnSTa+IORNfdB5FFicTzBU3hCeYrsczop/pEbTWbCoV
R/8O1P83kjcQCMkoksX8so7+Bj14iQPRz0OmcLVl08mMDg4o+ZWPRBDFscOaxkjcLWbiwNG1ejVR
VFHECl/KA84Gw+Wu2xQ4PXJGqHo4iuS2m3B1jOV7DdxJi2hRTBCeffMkVkkROZ/zGgMo4eJAPsOh
f9lBJMA3U9ejAvQC9bVu+XYXVlUUi9+V/eNpjq4zQFeP1bTx0GJ8IWtCUxxViYdOAK3QhZxX1jsC
tEMbTp10ORZEXCgaImciG9IF7ft/NUbPV04eFrNuvdx+knY3azGrYCUN0AF1dM81BNMbyROB+e5U
TZEiVP47zo+qYG47GnYgHlpHRHpjcVzdcZLOioX7EBNPf2SSTHOiNko9/YX3tJ8zm4d6cETmHKvP
x5ZWVFMeyBs7RAFod7j19k/FuX94JwuPmvGnyRwXZx/Fnp6g3k0jW4wahpK6R6uUErnFO+FcCir1
/aTUqMoWcymFEhxyBx7EXv48Pi8nwX69VnKW9Dihq6FndKJASYfwJ84abFfe5QojncgB/M2JmXU7
U6IOV6mFnBF3X/LHkKkottmL0tnvOaufNRkR1yhaRUdWSGLVRmnIfnwc0SnzA3zxgJ2zlM8KGhFE
4yFDwsnx/8veuzW5kWRpYv0mE0y2GjOZXmSm3WhwcyKiiASZZE13DZbgDIvFqqa6iqRI1lS3slIg
EojMRBOJABEAM7N7avU/9SNkpgeZXnVu7n78EgCS1ZcZM/XuFBOA+/H78ePn8p0HJ9mjrHjQy36l
jzDpGmbr4iiRakQYixbF3DXQUS/T51QweJxa0odHJp2Hd9ENE1yqULr3lteNa0D3a7xBWCq/ZMed
1m30nLdy1EGKE/rwH7//n81zlk107JJF9i/YCOsP/+nt//3f/OIXMZcOw3ddbUwuY6b3S3YYeGJ+
ZPXEht1ijTfBCAPurtcdw+D90kWSRrtXx2cB201W7ycraaaFXwyCMwdbk6EuH8AWRt0L3HTIaKny
cPgAj/3AmDrv97LiundTttP4NeNpn2LuO+Rmc5BqGp8oMr6jgIJRHjSo5z412fe4sQzkUgP2qKOP
LL3HqWQ5IHviUYNVwUL7KIO8CtCP3Qo0uk8vG/Sn2RRSP5mg05osdtBIvmu7j45PV/V7EJiMxRAj
GDB3w8H96+njbrLSQWY65Mxc8ICf2m6W2y7JMxXk8jWCN2KmdFKFndGnMA9penYoHAmDB5gQSKpz
/i6liZTSO5Oxbs23ZpuUUCjoMHqGEHRecWbjz+y5ZgFpW06254tpdb0jlZxtNQpT3L7j4l6jbYV7
Pq04yqooyz7dh8WOzK9kHtyyCMm+SZI4fGLTg6KeTDarbMrQjopjiv6H3hnp1JmTemRDP2f0mETJ
vfsI3hePu9HZ5V5tPQyqdZkTeNL1RH6n0KDE4qXnWVHtPpqQ/I97zOkhlKN0cJxERW5NvS3EJRSg
MC31SpIkwjsdOFXxsJfdF04esW9jKMKOykXSdMU70M7GCLNcQgH8J1wiRtawCYJYt/qwqy7WlvsN
iFlK5p5zXyCKh9+B9OiGIq6gGp84xnI+XuPU6qzX3T9AB20kf0t/FtXV3v1Ro1Pf+luI+vwh+/4/
+MlMPvzy7a/+D3arT2e0sRlnCNzUhbAYlB+Hu+1EiDvZVy9f5LbbgldDuSRPq8kYda4z1Bhy5AUX
mtxM0Il/VW9g3ZrAscQTSkwNkwaMznCVkeROzv+sbCJD94wzO3lzPEE1h9B4Rd7M30mQGHRcmhC9
HHsyEwgeKV45VEvyZISQtha7DcdGnkiSUN5lpSDIF5sULZVliI4YobCh3iidkcnLSsR9cVVMlIHk
iBHkM5A1Dx1IkPC3FACsl6HMt+pbz1kD1MRfSM4T6UjZ8Z4VkcdiIiuMAF5YV18pMjCp2PEpMZvP
1jcuuZckUiQ/crzeZhObPHG2sIS+x7BzcUKyYAFuvdgSxEo5XIkNxcDTVx8ljpP2NQ4X54VfwB1R
449MthQ48jyRXdRfcq9cKAjH/hgV8PQUn7Vrao4RhCsKoeE0d0JmfbnEt/llvXhf3SwJ9xvm+mqM
yFrw/KhOoQHUDMl6LBAOzfIN1F2KisrQ+wM2eX05V7EEBoPLhAGLyz6OFJO7yNCMxlFEX+SsZi5Y
6yQfzCMTZA2DZiXxAUVpgwHin2zaQ0MncgCzDaBruQTl30EnlAajuqzRAfY4zTop3cSRbQx8tWqc
D0ucsQ4W7hUtvMcDRMjwoxzszmebTJBXj+P2KesS2T2q6T/Z+APiT9C1YNMo52+vIeZKUqrAyt55
8gq7JQvOYjuXkgdQ7HEotzdeWBghh3ooB3mjrONy1LUnYhlROUYhjt4likhgZmdCBb1resS7ytjv
NSoPhMuU88+/oAmZD3qX7n8DX8jWWTZQYSODTMzJRE+J8NJ1m0p1Xl3L0XBPyHD3pPa0hSYK15kE
EWbNLstkoJdLRw7x1+37IMleOfVfO/lhInPhkIcqzDN1NrxP7W91czE3w2Pz50mEiJOgHyHjOEL2
rwjhR2bJjw9N6LBe/f7tszdvWSse+p3ATSC7H1YfPvg/LuAyx83keZChC4v8gGF6m/XZF/keL0Ju
CbbZrO5PN0uKtSZqhtjQ/HHbV3SUQJ7mh1wi+pjcvQ8PEvISpu6zw3DKLd1EoelgGjVdcqVz2kBy
3RgR+hJdfiAKd323ri7MAsE4DjK6HccpNHdn0CW3Q5N5PPMfIl05PhK1uLphfo+GNIOhPVsz6Os9
EI/ZWgJ/GI/Ifrf8OaOAQ/FXGwS2hWOY003mD8GeT05HaaUZ/mzQglN5PE2aTZU/0sCSthxkkojM
dcJCK7wLIoQrchIaL+rFzWW9QTbKWSS/EXg9Bh42Ya8mH97QP7HiaYRVAqgd/sU1j+fHfghKbQTG
iP4NfoORUYbZRLw6/ESiZaLhC/IpYMnPnhXbPGdRw6mqtXSpkQBUz2MzgWDTwclreVar2oU04fph
Q4+1NyoQI/GPwoS78gYJl4zzlKwpJQQLTCVii44xTZmYrmj1+i6N+YCSU0taaLZoSkkOYXYFVQ8G
FIyh+0Qb9PAQ5deMcW1UTeqra4NN9US+x2D9Z5TVRxKdBVRcqtQLg2cNNThaW9LQoN0VnmDv3tl4
63fvBAac9a+UiUBHPlKiBxLToWp7Ptc9sqzaRKXogW5QsBEWaSF2fx3yZdDouffiFyBZsWFibAZO
mYN3KlGstjPidFkS+oRFO5HX0EBV+HnIA6Gcijq7mUTD6yMfbcSWMz/z7I+EO9sDgtTbagFcC30w
Ct3vMtFx123ePp3tmd41uT7IYBXmUEBILPoqsozxaO2Rc4H6YmGAT+R/i3JB6CLs8h5vST5LGx+p
DMx5IlbUM0kqCAAbBVk6ScJF9ZnBhge0azPlUuVyydBuH0mvMemYvaj4GOv5emd9Geez09V4daN/
fHSxXi8H9+7B47ERXNd+vTq/9+CeFL5navcv1pfzx+884DjMKbu0ugtS/sWpSg3ahs0MjQfPKvOM
nx4/14P4XTnimK323Tsfi+DFk++ewbivSAH17p18nKEHxYbcWkA0snTQ8WrcSDbBd++QN2OiNjOz
PQ61COABFUdBMFZcKgJhQPRTbA4+YDLmtnMa3KAKDiLYX+oOIgGfN2FgGvOUXXs4CvndsN6I2v2X
v1EK+NAy6Vctk2BUzs9R+RjRY/G6dKZ1/Pak9IMnpPkgpGnnOGP/J4NMZEf43Q0zrleqBZ+3KTdo
/ngXHrj+ep2kGe5WVis7POZWyDbxqUGl9MVpSPcjMEl5wBpWnPYn5e4XSLFMYukJ9zEdb+8hQQVw
O6SoRnZVpE1IY1eMNnBbMem87wD72YI2vobqQecH9ntQ1nMjVUxB9IeXPemi8Ph6qActTrZfP//2
2ejl6xEGbf1ZnGuDjW+bDU4uZn7yLhJWcERSJGdqlqWRjchH3zdtE9YFXbuceHy28KpieB/IvqNR
n5PsBveoiWqq5bZWlOILUcpq8r1MzWPIpEYUYcSnPcmv9uM4WrkU8YvWae9HXaDG0bat73TM9a6F
aBSqehTsZKB9FZ5O6y2/cM+r+IqPxWcs/HG8cmGHVA4bHVDTppwpY0UCzAG7vkB91rt3Pbx7YEhw
R9WYdQuvUf7Fk8gFo9u4oMsmoXRaQZ/RfoH4nCimwhX5YQPCtoW9N+I116/PvDE0ca55vI3Hxm+/
E2eHc1DXuALkSBmJ0E91mcctN6ioqGnaEJhbAA/NPCEQE84S/mtmqFumH4jHhDiDan23C+wGKFte
jj7QiXV5ETbTaoPgZD2ci4AzBzyxSfZE9Um5l+iJJm+U2QJmfwbvtJVkKaDkzw3l2VAmIpswoOVt
jzbamN8oQzH8N5IMyNA5FTt99B5tCT02L2FbPxnO1pXtd9AYCIDCr9WzndsHVlOKRkthp56/JjXN
5exyNuHMh4sKX9uYuuW0uhh/nNWbFRnVWC9s2EqfhQa7giPYIyMdyEyOq7PFOh+gzdYpk3LWS8PX
GD9tv/5JLBvVguRcNhuiJzBcxWPYApjmop5qHTJr8ljh9/b3r56Nfnjy+oWNi2t1opKnRfK5QpBi
yLIpJTq66X1E9ROcbo7qIavtwvPnTsiwSBcdiPDf4Dd6yeBDJKngQZm57ccpQwISUdbm4jd5GW0x
OxWD3apYUh4I0eMcP+UnKbec/EDOfp6J4S0tEskeQTsdZc3CP4pWSSKXVwxcTAxAaN9OWdc02HVh
Ue0iSS6hVxR10T0oDKOCI5S3B+V/vUGlxg/c1/ZiwIIm7+fAyufDh0mFdHuAn6ePjhYADoybe+Sw
+Z5BzR7Z+PiLO88lxRNIOC4mn2Fbr8QZATevF/naZGKG+2ktGttQVlcWKOhkz3NvRGxPPs5lMrCU
hjjM8slFPZtUeXrT7Ni4n76/aGvQdWgNYJhx2D4sKaeftZMdrPrbhd48+xqZD4EtriTICDPfLE3A
x4Kz7VCszRyDvXbRcxF/Y7pid3agMA8SuB5KAuqmJSGWtgNSY8/dvn3HKydtoyThVASNjIAdXNbj
9+Tdgq/TlvoUGKQEPE5UV7ExIf1A0gfFhOHLl7y/mvwEhNpEIG6r392/563373bzhAtpxJK+ESGO
4d+ThHchZ5DnZNRs3UVTyOFj9cVIWP85QYuk7Qm0LG2Md4f/oNSFf9r5+h2nKeOHRCUiLHaTR2Di
WhNGHHXh8zvF9tRcwZ9yS4j0UZHsMZI0lgUvfSfl5hjbY6wxx0opLXMkkkpQGE7m8YPBCWK6z0Ge
K/JDtBmP8p8VoR+35wQsbND4cHyCS69Aqnivl3YBhOI/x6KfXhmFNZ23XEz6TiJlbdEWyHotJt4N
J1I92kmdFNCBY7Kcks10caOV4RK7lthxRl41G4x0tiTk5dY1KtwE8fZgwVSegLaMr50h9yCUn0lf
v6dnPcvTx/jfE+V/IGoU+DMplPlYODtEM7u1lXRN4w+kmrHdZu0SM5YZu11uBWfY7EowDbb9nbAW
BkpzFfwrKp/oBBJIbyZ+h7j3UMgFrPE2eAxZ+AFSMpt3n1OCsIApIebu0Ujh7ywaMVof3XzWpjLG
dx17FQaa14yBoWNtMUX8Lddl9ih70AKWss9B7VqtAb9c7eU7yNLRE91LjgnGIIt5hSGY66sanxqY
ERHBAcjGARcpEDSH3GdmuvMgBz9oRV/BIgbc6bBrQo6Pj04INPOwW/4ZmJQdv+ZOu2fBmwkjrWFi
+MNrjAPiZMf14nCKemmcnNJMSWcboFzZctod6zPMBGdvn/QseiYxbI7mUk3mg7/IZCqT5G3mkpyr
WZl1eIgGNYRWkVTSMpk/axrtdeHN4j6ogzAsvAzMPBR5zCbdGkWKJ6yNUZ26DMiedzMH56d+wohd
ZHOdfYUM3YIrEjZgf4noq/p0hQU16cpK9MkgNzGrZJmxbO0Zq699yvjdHpTNbdFO3Oqxw57zzZJo
gisfH4L0RY9x+Jn8yiUqjzZwtWAUm5od8ouuXf5uVqa6MjRkB4cPTlLTW+bhbcQ/Wm2k8pbYkWgw
dOcRR4qUC5aB7/b8V2RlrRVxqCmm9Lsp/Zs1z4S2rJQ7xDYdI+fOtuHxBBwBd+V54GuEqnDm1BX6
8EiIDTLgscdzZo1Cl5mtXaZ4MuRXpIxYcSZq8aFBeaGfeekW4F7DZOINZ1uAT4f8sQyQ8xuj/rPJ
zLzilAWY8SUdTZTYmo7vAgGdgblZTD/7jEF0NusaA68mJHrOFmMzMrFt9Q2tpN3DWgbtnk0sQaij
NaMfWeWWMePRnMOkwQ6TNERud95qrf9y/eKQt0S3XC3tkZcYU+Rn7koEMaCebEZm1Dbu7xz6RJKB
J56SZPqzBq+4VbH1+tVu7FR8Mm7EAcicTdiK1epjNe3Gz5KlcghNnN6+77VojDcpDqCuTTbwCNcK
nCVEK2QV1z4sV7t7eBQVL56OAZMJlE8tzfSTLqsyZPYH9YUItF9QHhTOWZVx3COItBwjPPxqVS/f
EOtZfQus5jdQ9GtTRO07kzpuOZvX5wY20pZzYzP+o3qM7FTq3YT4RbB0GOH440Igd4+7WAREK7jj
rp3xm+qd4MWHRZP18RGd+t1C8YZ2uG5ovw8s92IRt7FYCZs4Y+XR+3lN7lxnSXAgz4BL/g8YfOK5
OYSWe9VuGNf/MT68qC9kNvoxeVwp4A6D9Vfn7timTyjbRMO9OCpyzIAyqc8Xsz9WUzc01DPmZbve
hkFNkegBCDOS8QL7GYSVmxc/j117WRikEqoULiIF/Jt0Z1s2dHSG/e1ujaVyF/P1dxY4I7IRlbNT
0i3IGTnXq3F2cbO8qBbyAD+krGrjJfC0zz5DAnADeiTQ28944BpdBnvbaVpMyoAWjgV91WodL8dL
KwPTL7gXEQNsRaIA+fVNpwboj2dNj8bQxxFP0AeCXn0rA0sDo8OHsHVQRBa/MG/DS9Fs8xtTos1h
2OPJhU2uhF0S90v2WFQ6A8EQsk/uBxlBQXneAsRQhfswoZGLVzTHdBKY9WvYD+tVYgN7693fk64X
ZsOUaR7oTzxIv+SDRLgYCFm1DE9+ihNx9SDxhd3/E77M85FhstO4l6mcIauWZO8rhWrtBF/ug2j+
UCD0aIk6A8uySoOGXaiv0QTDv+AZUd8fyfdlpC53Ow23en540ROn7jyjz4fj0wmJpmPfxsGj72+b
DzugvaYbvx1RFLH3ABDhir0b4vUIjlvup/WzMQ5UP5nowKOvIzNI0qKT6af69FwVk06K3lK59eDP
x6wdAaabypWxgPtqU6UAEEQCVGH8sDatAMaBKiVXHEadA9KB1LD0qKsZZAjPn++VQPTAGLTKlO7t
+tqK32bMgyh1tSlk1Nx4bgW4yy1JQi1tX2VDR8K3a6RSOZ+pitKKWmBYnB9T48ZVdMWOLYkTVIfi
j6YHLWJ1sq7quDIcoB+G6pFD6JsxrNtmbWAO8XfUE5D+CnVVipPcsU+7qyp7X1VLqsHohrVx45Xw
FdIsa2+aHVs8wIX75E2uTnoo6CfDk45Fd+Jmh4wELUtfflKD+zEzbIElJUUyln1207Ivmqcmp2qU
coHkEFgmTAGKA2R5ciKIFxI+JyF1cHViEwpQYV1fjfH5b/AyJLtCw4surj8C0dCPr3f/SVMv2J9d
BG+kCdfFdJbWAlG+vQdYCNpMZmc19JA9yJ9BCdNtIgVkUlRcNySXGn9wj4eGHDYRTSLt9kuzzHGP
BG85Fj8IeMJfkMFfslTO6/o9TvlmaWAnefJ9aR/n1XS76WdvUaKygS4q1ENErBGjnY46gRs6s2UJ
Q+lnbgHJzRuES8JATUKl+KTQHwal2U0DvISSW1r4Sgw4GZ9V2RUQA7nRgPCEgBwcwYN5+hpjelCx
nCiQTtY8JkrDzLam1r4wWOlK0GWbm2YN0irOMcaA/iAQsLgc7MRC6NYeldO6XoPAgAd4mulswPoV
N9msVtVC8iMnsxVxACdcnW7L5H4IGPK8FXq0FRbfrIxEhiPxQSWgstlJinUd6bsaLQBtGpsjwqFc
rlvdZDTO2myLlwweBJ4A+56Dzt09OoG9f9q0IG2RoSzRXXyzD9vMNVFTSODYmNvuHg2kyaM2E4l3
epc7QuYIpVaOZZh6wX9mx4KY0fHDj5iq1Q4xL83L20NGOkxaqJJyme1QOOfJoUNX2LG94HrlgMFG
aWzsa8tO8MSUW2YNo54QH8UySEMsEVCjp8xLQyDypCqR0NklmpJhat1nXEg4rQxx281wjJfHiVG2
wGk3NOSqioZ2BzMk5g3DpJHPOeHgQ43PEJIFo288jQxV7xOCcoGX1VEZK2g4goNL4mWOaKldItlN
H9frLQS9wSYGdO2HToc/G+Xk+mJbXp6JvJBSM4qf9nRQ2pr7La0Rvpyx6GE8VSwWV7dM9vD4JJpu
BIpZMDYvJWzFc1ekJzpiE4zX7n8J0t98XRdMtmU1kmfXYDConJ1MhA9xV+GMdZOvCV1ZtgQlKm3b
ExMtfdIwWPBxp0sRLFMOJ4mlRt6DdOPkT/il3WsrFJe1TQ8rh7IbUDWxW7K5sNoIueJIWi1c7Ujo
5SQqXpN+3T3aJwG1nR/47cq3fUzrvfKySFNm8ZosblJoD28nGQbNgtEx0Ac/S9ctnJyifccnyhxH
E6ljZizYDg79xG6KQRhslNiDpV9nz/Reobh9rL/Y1+PxvUmd7XVreSM/xFmZTI1WFjSqFs0GZORV
dVl/hGcUCI28oIXfBJRBX/h4pfYZHQYa0QMj6DavR9BrYDmpQcrX8RBNeYt+4B3iQTIMljNn0tsp
XS0dQhkydmSX1DlmkdITwoU0CcXlu3aBUvYZzo/oTLD91uLUOWFx8HeSidXNWmY2KuGOoJIwVAUJ
G6unKZAT85BMiTLmN79J3Rw11brb4B+VEdA7SpixHmGbhNEcS9GTztbzQv6PRgNwOWWjZhPrAC7q
+dTCETqVf6NcS/tb3u2JZAGSdFNicdlZtdCp2/bJivrI9Tk7WD1WyTyZLkKvKksRPrdUDqFcqT5m
5/GgWcdBYTGCBcXqF3509wIUNNrmHJJGEF07AIV84DzluDvw2o0e18Gk63rFFCarmuADuCSUGBVe
em6DrLeFlVpkFsnXlLKJI/af2yluF4/QeVVNdptNW0zm3ulgU3X3wPi5ZseyxU5IEyz/gaf2CUVD
jsY9aC4IVlCoTCFekCtYevMcrKAHkOh8BgKsOx95MIYYs0ZbD5lOoY+hDNTlqY8Cb62SZagUckbp
5fMQUzQgQXhcyS7gL3HMLlvkUrqx5fqB+NTHGq95NV4QwkLkH5VG7uM93+V9lx67XxO9k01tSkCC
yezkcNvvNR6gP4cE3DZNRIEGdb3DGKVbpcQncqaTU6oKpCoVhq4MHD10iDd02/KDeuQxNjZq86fE
ehvIQ4FgdOi20gGBOOztZVAxV6igI8Y9cB0AJosCyJYBWxeFLlnD8M5Xm4nkkknwHNRAySE0AzAc
/gEz2yjwjh1zzswEu5AzGzIzK1zpp9iFSO2i4NW7pSlbh5riT9LUT0rAntYBuGAsUNsAHNWPbdvc
U6tE+8K1JjDDQSQKdGiz2NWl23THV4xF/dGNeT3awhBEJmou4LldXy2KBGCjB/iZuGUjvbsaGUqh
KaTbXpai1Dc4zbF3WYBGol43UYfjaVB9u7rgPE8cISYMN1AbEVsKihBAcBmW09MFe2t2djOyKXlN
FubrCQK3G2/B8FVsjWAqERplcaCbLRB21zdzvFO6FBmxJUbZFkRfz49VVz/wUPLDDMbcLbw1yc0Z
xRnyvcIt73M0dEYlJXszPA9gXHL3W97j7RmID9SVIf03JS6snEJA72SyeC3Gc8nMzL0eyr+34reZ
GelQ/o0C8lTE9XhxU0CPEmrEOacwIIuAdMMgW6PfW+Kp5rBeGQm16D5/8fbZ6xdPvn32+vXL14+z
gwZ95w6Qcrmt9tl80+DLk3faP5NszYiCzlVwVV+iZE5ObJQ6WQzs+GXSLIba1fVqg1nJUKK1mQya
zanIdwjdqQ0ve2MTG80bwVmnjneIkSoA043BYOabpTH+wUHpEG5InjhqwIlsWn7dJDayFIk6TEpo
GSn0D3W8oTKOR+WEoVCUDHQ9/JQM32x03tErdzytpkFZxpQu/C81uHRKhjJdjxSQ0r5+gzsIUOvZ
XA4i6CfxFQgCKOmbloASX+S1uDdYBb/opGI1KNzLhWoYvpiOmdWJv9VjqmdrpE6minGMypu/BIvH
A4m7RBSH2eLcgNeLT24TKu/mc2sntnp8q2+OtFV9fveq0+Nw/4+VJ25ijRHFygDptzJFtGkY7Y7u
2knU3t2oswklrQw3PARCI7qMyXgkVnpLt/H8tCMGFXRCmfkVVPo28n1BUzYSnFtFKmqhuFLt4tvt
TKC44Q/hSccii+LPCPG0rq/Nn5zTFYp2T8rk41xAoItcGBs6mbDDacrIj79im/kOYpezhUStYA20
7l+Kqf7DZobioviAmEL+ea9CdEfHdlP5LO28xenOuNYgdHNojgfa9Id1u1K2i97kXsIwWXBUcEt3
i/14m3dz7FeFlmtd1/NmRDlyOafznu1Vi4/Flodiy2YnoPCRksh70WwNJe+BA7E1C67XzZuhEAMr
+cDjXWIWgvc2R2B7+ydyauVf/VVlQvyLkX1C0+DlDZcymJkjk7xo1FYDG6NKj7KPKWxdNqoIPZVv
JR1TfNAMDqYDcwgaqXd40PTEmcV9k+OV2m7elbkyYgHp7PUPeGbrM28eW2A+uEQvMSflfnCrd/jH
7Bxk+YXVFjtXHdjAEuYmcherXJUSExZ4XRnEeElGRo7TLqN2vWAYMLK3U15oFgr6qTd0EFfJ/KqX
xdPZtS3xEFTnKbveZY1uaqbpp7r7kRKvXs3OBaMxwT1aWAFnTnEaXFIiD7a86c1r15DTx1YnsUgy
Z4qWMXETCUYyogIR5KRHI0TvlMQkTliJXjCxWwyF9YmhxiDLEfLC5GpahHKjntMAslIlLtDmXXqz
JMIu+SJDQIIYqJJQHeWpSzHa5LSGHrDK1gKPG7y6MMTR4R/cVBQcjxAbiEtHfmYUt2ZypWPKrxrB
9TXWtIM9Mm+3GRwVGpD/sLkOrinfyiibXsF0XUs2GUfAzDRNC+w4EJUwYJTx/kgRPOXkWbMFFvF8
RHxc0Aj4Uz0wPDOJ+PYussGqOhu8AyrsXPQI/iJHwsfv+tlzH33bBbKSMA9nGCHKyAdZJe7CrN0Y
rDxGfL56lYDU9ASR7FEbHD1haXqqxKwwPpLcexEaMQm88kNBczEx/am/Ttst3Eblfuysczst2qma
sBk4ANvixMZ7wveK8N8IresZdd9Dr/cgQEOO4OOG7je22Ltns6BIt9RukqRF7P5Rlu199txH+CK8
TadQMS1D3JqDVCZXynZi2Ltqi1sT183z1HL5+cUZbmqoQFyDFVpGbqYtokHZ4iEwT3tJraq58RpI
5INiVOR4ZHPDZ6ZL46M+Z7nE+n4m9+i843mC2lETXG3ySLV2qoWSBbxNUjvmwCN2Lxgvi/n48nQ6
zq4HwDkl7ao4JmmNWkkrdLJFy+kh8jZx8tW049JIvWlHZuF3Oi9FJ4Fch2AB0PSYeOXv51q1/QRz
971hASHjzOZtS1QjjCg5LAiVib0Y7EPZg9h38oWWYSS1K+rmk3Lt6dlc15n20Pxlgzi697pBdXje
L8stFGTIdsdzD9LezvaESLlISzFP74P08vcyem+lYvrph9RlQj9Ysv61kfTIop3ibQ4tHXhwCNsE
hNifgQ+UggdHH+q2DDv9LPt9veHgA3S+ZVHhxveqI2kLI1Hm2bt3h4cvX71FSHATRUSODYZqF1Vp
XZ1uJO3NL4Aivpqw72bEn8A2CeA8pdDzq+7h0pdK9KdHMDWZ/cwtqReKZnsnF/nzLFmiqqujnVVw
p16hBIpyHkXAqJxKtM4Y+aJS7RA+tSh6x5lRd/ohKumF/AsvjT45+ty6qfYPa7w29QqzoO61Qt4U
unHwEjOCPCa1GiskGJGBsQnBvGdJiQMCfDl2NCJVPzpeXMymnInaszOnpC71RnH9CK8TYU5YfIfX
d+Cj2iLY7ucWetO3Y6cjc7AyTxSMP9BiJXnhSYwEr9nsfFGvquEzTqNoo1lTjnhGNFGusNoHjylF
xcVPxTjkW30uiHDGo9RoWkz6h2s/84OFcKGsEH4oSVfyjybf4YZeEKd00tka1IJfhrc7fOfbtLDr
UlVc7MVJOlTe2pAnN1x6CavRJ8AboQP4M1+5qmzST96sZ5u3KsjK1hh407cf+88XM5PrtmzzcxUI
81z6LDWbarIlb4xafFvl2BA60TlO//RTh3cGe8DUp39g9xfJZBvpUxMZUKB47Kzro3pTWSIc2qLY
K9ma1LF5Ju9rY42SFB3e2061eKWiyV9YESc4SCby4XZFxkfpuel/R8adt4j5bHxhkybp0AuW5bNU
CyCOokfPKH4AaROcGnTkLQy/GSJ+7ovYecmFNmxL3JBoNKizd7N30G3j9A8IQ8HzHYIuuDthnyFw
uhvxwPJ69qH7/f9k3EptBm6LSPThztt/+R9+8Qu8XRCIjDJSofs3Yjgt1hzKsx7P5oROxtHKEvJ9
Bt8SUImh2fQ7SMbuevMXrLqf214nDZcy6iu/rMsZLiU369mcz1zoRVZ4mE0m0ZQoQGwaSpArTzfn
nqOjTevk8Na42dygiQw58YRNl8Y/w56pWvwFBe98WHRXFfluoJkP84SD8LvEv+EtM1t0ey36fpPJ
R9W+hGX4OF4Nu9+9/OpZS6sEGYVpQ+rFelXPVcJ1GvQ5LhfZrfqdnDqQo84cVxVVKaniUND0Os/M
X8D8GuuhBTuvknBfyggCAuAN7Q2O9V2u6o8goija1TVj3NAusjuKGqKx5lmBQqTFS5SvUy0K1LiN
C7K7pBO13NYsylWtu2BRt2wEkL82vBt4nRimS2mhcZMsaq7c7cnCfPXs1etnT5+8ffZVhsYlOJ8C
O2/225B3xZb+XI7PZ4y0gp0zn1J964R7SaDEbtsVk5rGrN8bnH7riU+fWDdzoReZ1bL9bW72xosP
T1GU3Ae+44uhSvtxyz0e+m7bDnRLj0e0ePNJMy7BEwvG+njbZOlRIbu8LMvHP9PqdP3wNb5faWo7
7lZmFZg561tURsID0S+B4EA+jEMJ+zmVSAjZpnHLhLYmKPmyXl9k/wulzyTvmaev+O8H/V/177Pd
5cmbtxlwCZOeCDNKBIk6ETnI7ggeHXIg8oO/HM8N8kE/lFMKlBNgktZ4TD0wmz8AC8zLrF4lRQtj
FMXzfTx4SIAgxYNe9qtedj+F+ZGeErMkv7Qr5Wqyed74sdDOdatLG9ZeY5Erm/MDNeFPfXjcKGfC
y767x60/ofwL743cHkFa3WTeHMWh+37x0tswEopgFYPbt6GUlh8c4ddmSX8DBYoy2pIk7sGQSEdq
0tje7xE9Hjbm6BiNT+vNeiSRwyO7Y9T0mgnk3+gCQNROjyUVHj9pq2aCMGzERaqM+NEpv2mC0aEZ
QhPiujZ6HPzKPkt3+Q5L0229MgvBadtTtg1/PjkGreDJ9JndfM7vG+C0dONJByTvkGmAbOpiuqe0
ON5dijAcQkk8yu9kTa0TyxqPMOT8l2PKLigGf85GRk4S11MTe3wnK9jwOq0rHp2Q99ot9VRJ9/uf
PmVkl8eIFSFlZ8SbsdVmQf+SY0+BrzEFDEohLrMFuTjXy142r85AxCa4pNJvaCToecMMSfRn2imA
u04xcwixArNbJHxHh1TR7OR6OTQNDlWrQ2674ztXVlcjEHHmmQS5SV9ifbyUGyTSt3xFCadmi8l8
M8Uka+cVsjtazel4Pc42izku7hXDDAK3vUkQga9P66bKisOPH5Ov/aLZXBJqypITZFIaWtMtTAiC
CCZf3P/si/aQUwQ0chNlfHJNy48iBDurK1atiF7FZspa1+vqer0lg8hXqXcQyFoLUvNxVF8XBt1F
/oBO7Xl5kjBKNPxwDtvNf1z817xkfYnpZnL69MDTEksLF/eX6a0DezR53s/QbWI8eY9Wdb7tDwxa
bAj74+jUy4rNwD1xqyBqKP/7oJTkrbio6k3TQig/yBvDTRimv589aybjZcWglUipn8YlpjldUZy/
mIgO8Il1cNAt22y7BlkRX5N9CmWVUylRU3Lgk1xiXY1XFOiiGEWKDt2umoDwnjMQg5uLgDf/uTke
v3R0JxKyixXjvx3/cYaQqEa8dEIbgzZNoLSNIhaAK9lhPS10dCzMVev7XZX2BIbtIsgumlTe0qPx
7pYtEipieSy6KCljWvCEqSBKOxaitw9GedJMyJcFxFT3dGkUDpa5YfXF3N0au9I1p5m12RqDO6HO
8jvRNSod9jtHUx7saN4RJt98t9NpD0/BBG3PX3zDCNAe8bvt2TbQ9WqMzDOhUZApqK6ryQb34Zak
Hac3xDpApK5W8xschTxZ7HYD+WRL/QIbu6k3wMfJdZErH778p/JHek223uMwcXT64186H/7z9/+d
VXqNV+8/HLwdlaRsy86rRbWaTbLLCjNjzppLhiSBQqRTYxybarJWfTHvJTEGOT2bx2RsXKfRJxtV
dY7E80H2HfzzDbY+Xterovyp87O0aTSO8dzTWSgiLqns4Xv1Yklr1eDphEkElF4lz5Xi69nvXr1+
9ubN85cvlHaD1RkkygJ/lp3LB+hSMmYar9dmcyr5X5wuqB9uie4TX0PVuNuwwrsW5CAMdFJFQgJX
fOfBKaSloE1smz6kTiF88vkY9ft8tLlgSGjM1taZhUQiTQwGVT1jAXyQHb7Pclo2juZCRQQRDEnV
eBHnPCPYJUx1TdZYs6Voxwl9mD4Uo8gaEBJCzSJ23GuWQYDpC24oMasWzRoWStaZp8ZMCGIqSPvY
F9ezli7wOmIIBM8eiw4wWRwYMZJGRjLmHF9uvXiOOdkqMjjXIm8fRkTkJYSVPkdUWJtxi2FWL/tM
T2Tx9t3fPbzs7tr9eDxxW6nt3/X0vk9e/xaPwK7Nb23uvOuRrLfhozmo7G66zIhJHNEKIOPFTw/y
1CiTgzykMawwg/lWlST3G2XkTGpkhQjEJpyXQTuq1SE8KQmBHOSYpvR74oezdF3j5k9WTHobHX7N
ja8ZBrR0EmHKl7C9/Ke70//JW0MaGIQ6Hi+AXLmdXIntsu6/JRzN8fwHSo4dQFIZP2M/5bwdWfCs
YTPPipEgsKpxfhvAKANowfWVuaT/WV1I/YNmYIx8vey0nk8TLlFQldy3Vx6khf6lTEyDVoNEDkz3
O4kpx5Ardrc2MVdprcaInKNv2GhK/zXKZJkfOfq46Z2+RRZOfmP1L7GGVClzFrVpXlN150MohF4D
HdFYQ4/h1TCbe5H3QFAROz48YuxiWDZPDa7qes4d/vA0pcHh0UlHLv1L4ZHWeApcpmJtjvvujDTX
6JJ1ye7tMJmeFNs6aPmhkNo9XbKMvJSlYaP5lErbRFNGezBTEL9i/UmIHDdRAI/XJvBLpBK41G4U
tlKLBmGPsezEFbRrE5HoWA9n00rEYLROyRXjYzB0X6gAD/yFI+lswy5ZDgz+O+gFfGffgq/YbtZQ
GiuQ3tHhdklY4CKgImNlIYf2DWMiya5r6vnHigCScMuKCkBqUcJJtiv3t0FJG/FAW05ukAQ7ya4D
ngnFlTuFqZwGQEv6VeAkPMew/owiB1I/f1VNapKXU1YE7hteQwW0Hob1uK7LX9oghgwe+umlU4vB
tIzPjUfQruFvecS3WkY7S1T6GxIUxhxgUkv6WzwLS8od6eRm+M3lEWmMdCSLvRMiXHtKuflhGpwR
buvcQDeC6dFB5YpYzEi4JukZ0RWyTSnkcVnf+4tuoiS/4IvCqVLermY8J/z8IPvZjTkBMrPj9Xps
hF73PhGiVs8iPcBnR2HaQayenj62pit9e2pK3ds92HTcZ+6OlNRPHUTcNl1FPoPnBdvmZfsBNQ92
1NYRtD7LbOusWyYVw4XZPvUZhVXxG0d25EsUac1rRRWDI620I9UMXxuwcQd0EgbvnuI/7/AYs07J
fP+1SH/vmLp+jPRsM+Kja5WQW94R0Bn294OV5YGpdwQTsS8F+3YwqRvdM947MK23utNqHqJoixeX
vLYvCefcniBmjWJrgTH6c0xuv4J8znwbka0XNxZvFyd3i9uakRHMfkNmMblASTnOVaeYKE+PiJzP
5dvQi02Ng9goNaCwT8PhQBl8QjfyALTvSXxDyhAVkLAdmOYXehhExRDRw4k6tq1PwZk2HbH4/iYq
jneG7AubUkOhV9jTkpu6eQvTM4OwlnQDabJjftUwzK0wDO6RQleyThBd+H+ik1Q7VIcZm5RXHF1Z
c2YrxoWZkdGwe/iedImc9hc/HfUfdiPETe7FsWpF8DLnvlTqedPy2WDCyZ4efz44ia9XEgWj9rDs
ScSIVQHmxVIzfEqmTcDx0YpelnjVTpTdTr/WRtYP1gDGKDnO6vQcTObXY+TRNzT9hhd64sw7k98j
O0Q+Xzd0rJi3Ze/eqbbfvZNMI2sJ1cBz3M+sKmrg9MJulO4r79GJaSZ9DDmcOZo3s5NDwY3RUMll
AG8Z5MHjLDeEcm90KMu90xj5chu9e+c18c6U6YdooOJP2yaVzVhCMtnAR91UEIgfi1B0v3MCAMcu
v3j5VueYJr11M0GFULkl/bDsgDClk4ocSkcMeasewkpLxf1iTmX0Sh4NVSGJl/CeQRrWmqLEZ3lw
DLN5Uvq3qFoK6m6X3uS0Op8t5HG8Q22Ckd22OKy4KVpQUfgiDB4D7npdhlvFZpNqn7D0rjlYcVSA
jq7m6n4YEYGDN7i9cXcTEpuv/TfbCH8BIWJEwJ2jkTBLl9fKuJLbctgElIO/H3HQ5WOog65R9qPm
QXaLOR70BGPy+cuEOo6ap6+scjv7AX1RgKvOZ+J4IkmTs8mqQs+ftrPu9P4ISsECIIW2wxyAgPuR
k7cTyifKDiTQWNHhEWxP9prFKJTHIil6Y7JsjxXX9Rkmm+Q+YSQV+b/NGs0JWZk69Hj4iyffPQtt
33I1eq15RB4kiNDqHw3Zy5sBPuALGCnm+PRJdYzXBEJprHF6TyszwzgVdoWaWErV4/ln6kvnE7g2
r2nLfKqskrDU5B5E0As1yg72TZuBoIB4BMZjCbnm2KS/4i1IqyqOTYlcrqLBcl10OTbta8dCovSw
LyLUEVmEojSyHbXW1PC1I3ZOXtFmUk3CbDJ4sBwk+3yMJg7O6EIvjbHsfePc8KB9lFaW3GOgrmPo
2b33QMdWHWJSf/KYHTnTCVK6GksMDc9P/jSHkzG9ycgAIAnI2PyjEHmpjrcpzCw87POeMfuCn9Fk
XqIppF5b53WQBc7QAdViEqLQTHWVl+SsDwLLbC0sESstqqu2LUkTT7GOqxnC9rlQNr9GTog5a5x0
hhGcuqpuMvAgmKfBTHnAYA/l9YzwOgN2y2H/u9P6Y4X6so9I2++l4XfkFYETTMoZ3Az87g23C2p5
acsCO4Q7BMdHCV3JtpjYPjQCOSiJrVMnazUykB0aIC8/M6PKJ1PESTCSF34UYs1gT0IwLSaJT+gr
UwSjtFgIXaG70vrG9g/5GbbQgmLvOnJHguYmSHV6SAb+dVY86H/eP4LLdcqelXh1armqBSN/auOV
DQLlpF7eqIHI2KeEGJvjp7yMEfb9PXGwEpx9EQuyqdcTvu8N6D9HMCaw//HGpuwQtF8de4XaZPc2
iz0wd0bFtzO6x6KhkQBh/WAoPLJXKNwgvg+1fM80DMUPOXEh6Z6kvg7NCNJnN9YIwQSCMEXY77Iz
QsnMS5AGUjubOItQZkp5lqfibDxpKh+NMLyxGY2SmSWVEB+Xb83PEVThy588OMr2oEUUkbFC3xXf
hukYqGrmgko02DoFAX20Jc0ZW+qktV5s3mkFJokTxe5HJNGtdJ/ayWAmDYNLq+beMoFUVKVeMa7e
GmAZNWMu2mLrzKjmLfvraVbXWrsdz7NpGyD37VPmn2vS+yfdzVZXTOyEs21eGd7ItULO+P4qlZAk
xEGjvwOASA8AYETXofDDvrqWuNtyLb2/Kj0NCy6We9h8J75aIiaZhwA8MVpULfb50N/vjjT98FOQ
GJ26zTSy+9qEWqlbHg97L6sul+sb5gbwkrYAX+34bopqKEtSiDK2srrxCK/JH6WFdnBhB+9/+Mqk
iiv0pMjGKE86+6eksZLtwYpXG/6VpTZXZ+fTjl7pod55mKjJK5ah7mx/rMTI1hV70clQvYvQmxbD
Nb2pSYLz3U0hHgr94ETpnYn5OKIJxRHczKr5NHwrNFkFrwj2GuZYU8IAoXNySLKuNxRBMLAddyoS
GZ7Pbvwmw6NrR9/58Pff/53xulxiXMrpbPEhf/t3Hfa8bDanlyCzoK/rBp/xopJo4uBQ7r8hASVW
H2eTKvC+7FF8s3CJzUpcFQheYZh1L9br5eDevVMi0l9Uax799eV8tZwISAImUb3H39zjn8kzSv2I
n+Gnn+OnuRa3I/HoRU/eFpfN/PDQDFl7XwbBzuLfldPzLjc+bKai8mEjQb8TxkUf5xwiTFDF83l+
EjmHVegESGX+FR+XuCy4Hm4m44VpDfocjS6hKzOWgQNnIVnINbArxKbpML6KKt8X7+OiTCvsbTcI
EG0+7w5idHopQviKQ9tW/22FjQO3/Bo+FfnV3VwjFTmvpBbI7iI3q8qLWq1UfbiP2Ske2lv1R8b/
K1DeVCP2Cku/BjQdr0gK9t4bpTibIeJJ46EBup7QREj7+wXQeTYyDpHKvVa1bJzsVVNV74v7mj/S
oZ/X5yowz6uBSoxEKKdXZjKvveSSmPAtVdDZdFb19c1MoDvoQ1HClr56hYWLLvsbd3uqe6WOOCCC
wGqQwRw0DNNRwOc+8omeoV5ucdRXR4daOIQmBiadhG7iz7cfcVJk+c2z9zjngP8Ti64jc2GXO4zq
zR5lxUOM5g0iQ5h7zmen5ji/AZZQrV4huUR4hqoDizcjK3G6nsgO6muaaK5b8tlvvN1rpmDUbC7h
6rspwjlxowt/6bcwll9KaEY17Sb9DGlpQmKmEcNoSdlI5lKN0hd1gVZk1FRwZQxR6/8G+DBe5cG1
SDuVpgUYr2+xWofjIDCJIL1qullyJu3y5OJVmtHeVjPuJ6zANaEd4x2kAKJwyQE0QXs0DYxiLvNT
7pEA9rI5Jze6ZR+TVaC0ic4ZKwvGRZ8QEn5WNceHRyf0GU/+vJ58SnpYbg9PDfRUBJYLYEjijrsM
vXHbnIzZfWtWJ5x7cTAYgsjli/VVgCyD7V/1DQEXcVimcDRRxRJcDo7VqfVK8royNB3cls3JBaO3
0kGTHR4+lm0Es9nzeBuIisX3/yPM2AhGZqaAqq8+lG+P7zMsTuc3IJDAg93ZrHBjGTADspdwTQlc
QOjkfoegcDoKAqeX1Y2K2VnePKR4vxC24P5J9niYPWQQHBcn6sSUa3iznd4Q17maLR4+GEGjowlh
MFkn40k9B7Hicmw8Y4WTGkgFklSocjcRAictGRqdXeASbSAV3Cfvl0SHPZ+7XRgWbE/qGOzN6Qwe
nQ0nuwyEOViRuumdTRbreY8tAh0LIT3M6Hs4J5P1vDjqSen+2+cvn37zw/MXb/63XvfH+/fvdz/7
QqLRKwy47l3NpoSbyvT6m8USEx+CjA//gw1MtMvsePDA8xqRyhnV7tibzt0T9EORCkb0qkLD4Zj1
0uiQYljr6nrg55RWZbW7ztdPvv32yydPf+uSm0lbs8W6gDmpGFaPOeXTl99+/92LNyCzf3FfGLF/
rzJSPj5kYaWn9VWTeT0Wy2x2Wp9vGoxrX+dN1owXs7MbeGedKmMaok5SRx5ln98fBHuIO/jFfT3L
Mrv+pEqKqXCmOx3u54Yaprf3qKJIYw47gi7O6ytaqDF0fLRc4WTwyYNyPTriYsAARoachlOeZZRn
ywN7hYGQ0B9pBeUp4EQzXn5ohO7y63V/JcDGhg40bbXfSmtLPVpvll46KCw7zOi3qL7HCX5pOYFl
Xv0Zitg32pYr3SrynAPUj/Mfr49Ojw+ayxx48qSeSiAB2emgnZMyS0R8EpX4a6Z1/zIvZQ89efHm
ObMfijvGCNrGPJwZwQCnPOjd3SEQWuSdcLQRy9kyTKh2JCMIHEbgERTyKjfNNPmFBn3E7ylnEkzv
0cm2MAeh7GdlhOocZTjM/oSwKtkg+/rl62ffvH75/YuvRj/85vnbZ71EBP0CBbR5UkNbPDzqlR6V
18++6iXj8HX+SJ/Eg4DEN6+fPXuR6ghIRiolg0/kYYrIv0Ydu5PdVGj9b6HyeUDly2+/T0wJUDmd
b6oWGv+QoBF3BB0rNqvlvI3Kr3ZQkUm6k01uxm1z8uuARusKX13oF7xP5B/3JUKnKUnEpX/FNzZm
eJSNSOyfGE3YgCf542YO8NWRzr8OdTVMYQgH/O3vbcE3b78avfz+7avv345+8+TFV98+g5YPj468
3ynhof75gW7YsFjHTf1uXMChp+P0TbV+s57+hj4WId1t57SdgtdzT/1CLKzhOk/h+qvnFSkumVbZ
v7Lif9MJJ6xw9f8+u399/0wpLd5Ycm+B81kiQrcnVmh7X1K2UpTMkU8itsrDB7/+1RdhvlKrssFS
xwMqcxLI1u5yOmYafioE+H4r1f1HYAefEjIiqvaixds3KGczXxJI60W9mU9H05pc8DbLAou4izoU
d179fgQSz8vXb3IyGedHeWRTsDfCHtXv5y3Qwik3vRlOQ85XlCQw9C4t5bInrl1h82+fvf4uJ1e9
fLq5PM3jGihI7IRaE9IjtisBsQWFzbK6O3hmshFAJnSkbjFU9xSncxCXhw/vY8TqdAgXEt8TQ7hX
hNkP4XZIGxiRjQ+B6wsvHgLzJoY6BP7LXHEIXDRd90tq93No9zW0+zm0+w21+zm0+3tu9/OHrXWh
3c+h3Vfc7ufQ7lNs93No9wdq9/O2dima9ght6ojZBI2dgtjyfvgPGAXzEREWf20jCbz8Upl5bFuz
WpupUMmh9oUvKI/VAkQn+CaRKzgtjxqFm9BJpbycn1k9AL6hWIA1+obr9fOXgVIgbTB29Sg/CHPs
erP284Ko54zxPeHT6nZuJ+2SQSeny5ArXDr4iVhCN/TUkG7RTiZtPJe282j+SDpnC2U+nN3UcSXn
S3lRJxs2P/afwJPjbf0Diq08Ypz0anzpW+5Md/CSNn+C9Gk9F5h/mJ/gxdbdrM8Ov4gSoknrnnaa
fsH80VsfUV5pmAhmqfikSXLZoF1MrQl3EhS/r+yN8gqT7W1fXP7+te8Yr+FB6qr6szxY3PskZN/4
e8fzJdssvb6jvSR6lQVJVIwz/furtJ+95zpueGobKH8qWZPMT0t+JhPnfcVJmGLC0KT2zXF94Aot
uUnMSvI88AsJV1JZl1HHLJGd1XJyMV5BudnacjS7AeVzMJfIyOwWTXIzvYP9La30FWR0rznbwbpm
zA+VEw7dJFHVSwBIddPMTtU5ucNBG5JQY8FukuzQi+IWp3J7NMzidnfp41wLnH6DNBAmLpGy0gK7
2FxiWkSraRFUPnQODmiglyuPZ7MWRDZCDsuuxuSaDJfR7Ozm3qLarFfj+eyPVeCkKtiNFalvyPcO
9hV1pboeTyQ5Hg0wQDhHD2lW8pwSnjOqgT5yVNjHejYVDxGsboMu4SJYbtYt63gIr2lP4sTt0p4G
7A7OydUYZu5Bdjd78BkuCvCiOeaOJXkYq7eskMw+egyjS7atL1u1/OzF3kTM/yICuk52qKkdZg9a
iFCtor1amd27lxV+U/6qvMh+JgGcQjpS9GP2WfbCj4HB3UAa+4z+P2ntsY4cbxJb5uVW9GGzblsm
rGWlglmFsWgaqY66cRSt9XQuUOTFNfDlZrbejNkeYM/Uqq4Zx2u8kNBUQ52yplaM39nzqWH2jdlk
M4dSkmxHUgUzKzERAEKI4ky6o6zrOeUDI1sJdhthf5lDwfpwnErpAHpVSDAw5ZEM/GOZZ93Vg7d6
SH+TD+K5vGsn0+ku/WufjDL4H2Hn7jIQdwG6DshOE7N7+HpXdjGqWZB1hK/gJOozG9UQdBP+LBOo
I1qeYTesBBX6mWkxZDlf/67ze8jCmgj819dzkxkL+GHhJLWeq6HmTuVWbQgxLZg7qu/5cJSptNl4
LIswKy1XQ82qanBVqSbVckZx4NtEtFhgmYwXWAmhAAUz8VCuljWIlGtrZ+sm+6i3VdvoaHunB7jK
20VUezLCkEmeMjsN2sV/dnb2QPLH++QOFTk1Xa7C4+x+IrhTHDfg3H/mytqHOD0XRKUij3H/ba7w
jP+NHDWtIEvB9+x5Dm+tmPuZCrogq8LoFEcYKOvuf30/Ki8aVVctFSfw/oojPlB1AOc4zBq4t2Z1
G+1V9QmkXz/7KuFIr3sMx/j2ZFFZvp0uqYhuT5i07tsps8rpE0n/6865afOVNxTDLXP/1/Gq3U7R
u+XSSFxztv3BrZtV2lnDfqzWxFMBJlRW1EFGjWxXUrVpOAIGrWhBIfUpLVYghLnP5rx2AswyxDsf
0j9cgqnY0pTNh0KXIpWK6khBbbrusDY66Z6OviJnkbWSS/msHLVUEbvvpB0vyItEDJ/yKzTBv4vu
0ZhFCcXLBLj+II/KCbfR2WWmiUw0ykbjnfcnT39Lgx7ypr9PJjrEHCddSlT8ewmVluJHKOiiUsZY
hwWM9HRu4KfCg6prP2ipTTwmqg7HOvMa/7ylOjDUqDLZxnTlX4clLLM2Jb7wyc8aCpmFlwBGTHAD
6KixdSZhItGd2+gaeWrjqv6sHiWrJuZW0Qjn9sF2GmqGFZFwhj/fTmSVmIZwnn99PywRzvMXyUbC
2eZN/ZuXr9+iapYT3E1GzQWmliI/ImJ7T1++fP1VIT+/Ia+gzUozMuC/1XzaUAa7Iv8dXDZEsyXp
V5H/3pY4Uc28+e7Jt9/CbD19u39b31Zn653Nva2XO8u8xhfrzlJf1ut1fZns/dOXL968/PbZ6M1T
3DOjL7//+utnr2FZvn65/2imV29mf0T5gma8tRfTq6ebVVOvXkmw084KSsDLe5Yz9n/YVqdZMXPE
wdqF2dKl78bXs8vNJVfyhiGhSyMtubrthnq9+bz/vlotqvnDB31dKq6HQT7Gpe7YDuQrHMlJojRC
uVLmdTdoZtz2qvLE6ffQlzigahSXkYOTFiDax9ZSYRux9IB5EMFSnmylk5iKL1++/NatjdR6M0Em
9uXm7KxaUcjTUFlU29espfYu6luHtzNEWbrz6iVyv9dF+xEsT3Z2pG1+1EZJvJ2UnMVztYUNOAFq
Sz+s9CljO71ZVWcFEi8jEwR+62dpjz1FP+ntKGNJD1kp494QYuaYUatZCGOnyClmLHYR8YIT15A/
8XipYiszAX/PxP3+R0TC2DA4utKbo7w2nTUgiN70U7PQZ87Z/33P+/i77DA7kiwZ9sUAbwV+KgyU
VRpVnesZodPDT11KG3WW0YaQt78rQdILQfAY0bK+XM5naNNmQBerMu3DcSQrIao71yuyQlQYYDDB
BA4zFLf/KXaHvYPGjeXNg18zdCHhYxg3bNIP1ZkzJ4vedBE/Hu6IhTLbNBvCI7mqV+9JaytNZzCY
ZnwOnS4Ev9a+RWZ6iSbjJfk3EDJlqf1ORFtj313io/s9039G74nAvdkM7leIp3CJly5pXKhvJkGG
jKy6XlKsJXuB+5b9ludMMprB66h55WgageJ/j1EoxbYZRbMOEgLp2eQ0YDyPcRJpjQa21aHAuvns
gCC7A/2Yz1H6G2RsoSTgGrPuqLqGQ1utXLNWN2vmpyuFD7l+t+wjEMaUUjBNZrPu1n2gu/rhs+//
eww9mNfnfUxVAxPy4e7b/+u//cUv0qGlX4l/CByfH7h4EX/V/vCW4AfkP8g4FnWUcjPSQtPXWIXi
SZXJxeryF7UgqAlWzD5R2AfN4GBqIntsEz1N9O5Rz/apVISbdTtdU16iA8bLGU5qQR5NEmAhkwBf
Td7Pq4/VHJ1yTFSH1kHcYQ9w3J+XdYMgrk9fvnoOrxUJysCIlQf9z+/JsjX95U3eZCbnkGzDO3hN
MDbQ9VqHT3ViIGrXJfLk8aLbOJ4fGRsFAeHnQoHUUa1MG0YZWmrIVftnI8rdNKlJF7Sgr2Zoy7dN
ko/Y4VHgMEi1B0GcZ0A11ClN6u3tDLGdJEAK9TmtMHMESBMP/yaLna6q8ft9uIXMTuTLzU3c1TOp
F8CM+9S8qNNNeL1lSnyw0B5awIpPKHEeRZofNKUNS9L71KXGLMIN6/7EM2L3rvmjdJmvoqpH6b1O
fhQCE5W0pJvldD5bsG3xAkI3wzPylLKta3Mg8QaoQdsPiq7GV3zBmNK8V1ZwvhwD8Sc0urUIDGwV
HQe1x8JQImdf8ilxDrPGkJKA1MimSYM42rLejg73DXaIfJWmZUupQZtATQO2SKGaQiXK4bshgVV+
PXZ1TrZ00lbrPmIHvsfd1PIKUXYfHdE5RogiHcfiw6efLebiO0bnHViml9AE6yxQmpgKPC/wyQln
S46+r7sJfzzpl/mT01t4ynyfzn/G7YaX5R7ELKp2bmshFgGy8rI1oRvNIkK+Ur6SUYj82iZlmbZx
q8G77mMITrVnNOgdEPxPK4K+VjnN4OKhtJ3kumRawPsT/X3OsztfPPzHo3842tat3AwnD+238ZIH
VXlOJBkPCwo3dJ/3KedAYYo6lsYa/YQwEwsoTJZ5jr1qKasfJsecTWbrQr7G4Lt1dV6vboZCrhdt
8CFiEUh56qLS2XCDQ/Mrf+wpEQPBV4F42BmL39bAE5Mte75bKuwUU9gQITDVP/1UmkRGH3rfd1AO
vL6cn1eLD4dv/8//xHGnst/OyK2ToJ1dZrjVDB218DNUYyfXNfzRGE1a0zm9kUR2gi0qOcwEp6Xf
6RSTEkGbzoGfvl9V71H0kI+YzbtawSRsrrNq088e3L//jx2FeULBrKuq00kF6D8eYoT+fSWIboom
Ia65n1meLq57WYX7vkm4LBs/22sCT5MaMXya0L7uqzJx7iHcB9dlx53v1k6arolThHkmDM1fFhzq
hcm5910FVxF+VeAT2ZPG9wSVHhwRqnQ+yvfAB/ahnsWihdVth3b5XNiCoq1FEEPRXainzHp8jle+
gy2UL9SNiW87KaXECPbWVm6sUubWY6PeiQ39Tz9FRjx8G8wm72/4MgyEBlP1OIfDQmCKJyFC1oSu
cFwzAU8s3EAFKqxX9hwpbfNbK3xurgzUol0H39moifF5oYD2+EscvW89bXnItcC0+HiSWxCdPFqt
OJDNZok+JONzfomVfVvTx39hgCleF/qbxuFwYlTL7khuecGZIwdPh2qxHkrUkzz9MATakelELIQH
JVV1Dui573b9hlii6DH+Bd5hiIluEAgNgbL/EX8JAAkNZyi6XUnWPN8D8NPc/x72XN/IbjFQ2cEK
D4pBljuYaljPmWAkQrPu8A5TLCi334FkI3url8E2/hO1mKtjnA8yh9OU620Pv+AuMD/oYwY/cbRJ
5yfrCfAbuIZwf8P/6SvglguEVNrWpZeRGY50oBJqfYuVukNpDlYI1AfiH+ZKx3vT5kyVUeCXhZ09
E9CkZgW6KyO1+9PNi35dqjk2cVDHcI0dlS5YWOVIHffGp6er3niyqhc3l73xdIqZd3oIKl2te2N4
4vZOe6fTunc6O+9ROFHPyWz5Kchc7z9s6nXVO62nNz2gBOx0XS96kzEhsfQmFcqNvQmmbcYFgf/M
NQX4SHBg8P0lBh71ptPeFCSD6dmiN52t4P8+9qbwcd2rLnskjOrabLKDjp7VC/zP6rJH7zP86uKo
d/Ggd/Gwd/F57+Ifehe/6iGOSQ8nWpOY9WZUpTe7PO/NFsvNGv7b9N6fTnvz8Sn0ZF6d416Yz3o0
emSjKO0pEpfjZe9yvPqwqaoejGHTQwSzHuN5wWgXNUzLoubOL2ruoK6/qJvJarZc9+TAQJ16yShq
PQa06S17IL32PvSanhRV1TkBW6+5ROxp2D4LhJ+Yva/wnxp62qxv5vBhcwr/t+xRIIWuvqaVW097
qDWiBV+f1fW6B2LxmmaMfajXq9563dv0NvPe9eXS2wSIjY3/4UWgybxY9VDZNK2uewQS32vGUOnj
eMX1SklqkPfyksLWT4SlifkZe7z31RS+vHCX97IbjnBJJ2UjQDw4HdfuTTbCt9hhXnbakEu5QaTs
IDpX4yu/myCz/gFTbYyz0/pakNbHC+NRAF8biU7Sm4m/NeXP5lcvpavSeV+9MIUtKJ5AGboS6lj5
WxYg4Q/T8eR9FI4EGBqaijCA4iMXQYMHI6nJOLbiikqydnOx9tDw5D4onkoYQSnvYeNk5f80Qdx4
Xyqj76mTlGvyTz8JfPYU3qsCSH5mhlMv/GrcJQIgmZrQR9eW6TJqUszfob6a0oj59wnHLtohcpCa
+cAGIXjX8Efk16iiAIHVXezuggEZz6WON0uDJjhKD7aiLGJQAGHhcW3vEdIzXTA+9iVLnFjUSQTt
ii9q37q6uXk/BjInoc7rt9VNQoNAKZg2pyLmk0AKLV+u6lBejts79w6dIWLllzZc5NmZR6c1Tue2
KtzEZIxGKt1PvD1pmqCsqtlJUCtoaY1jHJ5zedRbs5GYjMlhiPKwTDE26+NYDsUdF9uEU4T5FCy0
Kwp0jN3resrHlL+Q2fW5xh3l+42/pHzX2RBVGBkLi+kwO+SKKdI+ZWFNXlRBk+6TYH2BHIrfe/GJ
cuaTjOBYKpwExooVrDJnloNfE28aPnpYRnUORUwJtLSCpt83/H5r36IzBjWEe5hdJbzk2FM7CkQV
3qaJU+YR8dzaw7nADvpzAd8YTawcuDW+v+yrlQabeD9EGgDFJ020NJ8SlAIodEOaKtsDDRAJ8i5I
z3kGQsFnAdkyePYnyLgu3B1qzt7WILT0CEHeHkNz8NaRDvbcA5PdtGjaAru0XisTqorFYkFENBAt
we280663EEfH8XLbCO6ZAZgJ3jYxh+mJiZkeUpI3ttC9m5qWZAQFrLnN29ay7MmVuCcLYdoOY4bT
rDki89hNiSNlZ0fDZruxpA72Hav0rFRRDzZFYMJBVMNx9n01jSnQb+CmV2s4bw/INlVCHaNFmae3
vu2MbVRUe+FqYMWtV6BNgbCqlIJl3oeHgA2+xhxDJI7TMdhTKxH0U8DJ4XPTroV8cELIIaNQDcl5
lh2H0mRS4evOOpMns10ouHrfvBMbySUdM9xnqSQbC2L8Q+6g3GZ7bFhbL7hFOatWmVIv59lBM+we
NN1cKWWIjJpzu1CpzczSPBGzy8LJl5vNjDFTSFoDAmiO8OTG6NqiZlCxwNyhT59ji+YeNibp0vHJ
Vvs2UDeA/Nd38wFMx93sRt559D6yHTKvvZNkK3i1UFGeS+QQ8NV/geuGd7BtyYP419zMTm2wiWG6
8JlXMPwbxnufgtT1sVqtZlPgtNRHkWGrRs+tVkS6B4LXutyff6mmJdu006WZ12DqiVhKZjbJ0aPU
S5nSL3Vip+TTFelXSL3ACgHUjFysWFVCihVSI+RJMT1nvQypFnKtOxAsCJ6iW3RnnKHWKxOtV3aa
GfVFdjqts9PZObwMMtRZMRzg9Aw9IDMqkOhhPstgcBl1Mnt/Os1IcZR9yBBq8nKZsYImIwUNBkuT
QQiDqlO0WGmDa4Ya8cwoZbL1OttkqEAxw4dtW578LJ5LVh8W7X4Gz+WyrTloAn84s+FJ2a+2m1H6
B6PwGr7dmTSIKSLiGqGcK97mhLUQ4lpWFST4Iy3ecFGoGbsCDkUxbv4H7Aj50gD/+HvUq/6XvOzh
h0f227n97rH97py+Cyn9vf0dNqFU6uZd++WybqJqgUYF3ROrs9GquiYQ6j6616L/DRD6V3Pvq/Fg
RnjgvlrIGomCzTzlKad5iyWGiRxTkT4nkbjvZ1vxUoptWIcW3HLwdBFHSZXexNe67bjehK4zuham
pU7aL/WrqsUvtZUSPLEIHCm3Fuhc5ilPo9u4hegDZxBToZlaNxWdjt1Ysh/R7bP//X8wyUlWm8Wi
Wn249/b/ecypSYDxzSYYkkQsCi8RKELZSZarel3DDxndA6ibF+wJgpl2sNJ+gmlnsGc8afTTthjJ
8LdFmTZOglP7s4k5fA0swMO3t7aSws8w+ydlCBjP5rmd9wE5//aUivz9bKl/xs/qZ+4ALAMVG2T6
sypWXc/Wmgp+5p9/6nTudO5IfzNOTcBpX//cGVQ4CtR+GJ9BmWGXvTXmXn4VL73KdLPi1cvVeZeE
Kl1KqALE0Bg9nKG9y8uk4hKyvFDZWEzSFMwb8yLDtNic0ma9Wd6jWbBNZsWL4X3GKAHxpN8FlvMp
2QscveGuNAa2qHEkc3Wje2qvpAZTI3+yYCkw//SlynHQp6vQMzaohABSIUI5MJ4m8HsPgRO5p6mX
y1TnX7JY/OI4NfWJq2FN3SsU+POQkyhn14Ps2k5UqQpi+s9Vo3LPEHUzgdpVMsrcYHaBv/7dMulo
2Vr7oAkJgKBuP6j0Hub5jf8eD2wJuTHU1AdzgzpXyg+AmQj4g7UDdQcDWD9Myw1/dcu4v4Lyf/9B
/8FZkx0cfiGIQ95q4erYye1RO1cX1aInTZd+1iDJiEIOuYV8kOWXT/0RHSzcZcjb3+CHN/gBVikm
dAaCBwV976DUX1fj1bS+WozgYBbWoP4C+ujyziXsOuhot3aUnX++fI9e0/KnN065WUbmZinwtYaA
5tdr/Mu4B8Of/Rkyz35Qb16f8xwFKzmkKvy36x1/az71xPmNzgbTa+nG0PanE8Ku0mDStaFvgshu
6SinMjjjq+rDpqL9ag48V+yO5JduqTIOmdIWfR3HYkoqQZ6+xsWRnwrT6aV4/lJkP/My0yBtgC51
2ZaGn8mABX+edKz6aNnnrKyarVBRw4RaWsCvpQGvhR3VzHbkqu6qiVfGxDvQ9SeJrrmuJFsn19TT
ymZzlvIE8YXu4DbfnZlok+zrvM7GV+ObeCnCSXfr6SOp06+2CaWRl40kU5E8GLQyRXgWUid3CWRA
IuSySVo47oiU/Fikq5g53HIut3IRwuYLK/st0eBH+DfW3TQFf+MiA/gzcUvyVLG71e0O/yErFTil
ziCFYWccdcXkDK9G9EjBmIjTuqkOMZ1kSl3UJckcW35G/0Gsia7vCy5to5i4DBs3VORHuuAou9Fv
n7969eyr7haNl6mKxen/OixePvcEcZ7Y9FHiy8bxJHSTmBpnO8lIQjXN0uOp8epKDeZe8COaxy1j
Vmca+VmCVV+O31eqR0MmjU0O8T+W1WESMBccm+b5Qof/GcqGsYnxEBRrRM7pKM5+rEb8NkO5l5Kx
Z94OC5uxhS0JssiG/TVUvNa9Ay2rcbveGEdNTFMJBYkHtYANK7xhJoGIw+PmGvd9N5lEWlPtozpQ
BX/Bnuw/Mz27bW1xST+dnva/nJ7+r5vZ2sgze22pgd1ToonvBkuOVkCswUssm89ootwm7Fka3nI8
hT5QjDTLuQOmoHeh9IO7NcT/WNHHVHYuMK+rBh5C9+xk8UUxVhFMi48iYji1wh32bUSfDltR5Szt
mwhdCe62V0QKcJvyJVMnvZy8JsoPI4FtDwYU/A1fOa7JV7Guqa/Z7mV1WcvjP8j3Shx46BZCAXKP
6djja74otxi3U6EpYsXCOZXE4UUy4ve31c1pDd18jkdptVmuW+CHE3VbGnXTbSLEvKXRSdzw+Te/
SViY4aG8dCPf6YNr8Z646ShsD+4/3P6WRUiIKoUL6Ir8riy3xwAKMZ5bgV4Wn3L+rlfGXr9mv/NR
OFhBBx67enxsmXZp00g1c5CtsFv0ElKuSd6CSxPsbWSrkGtMZw/j0NR4KtmqznuGwvG6B02f/j89
Do9zHRGSnxwPHp54j4SwD2hQRSrHB81JRinbslccruIQZH10ruN8Ns1PevhHc9MYHGX85iNKE/A1
J21FW1qewKg2LObLcVO95hvN+tt19vNubHG4VztRpdUTP5qNvvlMCkFjTjafU4EvXD8nz7IwP+tm
zbiq0W7gBMm4JZKg9aY9oOt6GlK3eQ7VYHAYW7Z/ktXYXhqCnwwtYCl1H20WBIxNBlND93FXVoAf
S8hhVvWyWq1vCq1rASqTmo0uXS4pLz4WXfepJhk5uZoIlvvUMzKo9POfTQWHEofOhBfboiRETcK2
J1KNYEhh6hURiH4sRQlVuVRIuLA3i9GTmB+Qyx7KX2NBv5K0702bRzuJBabQSRlcsFqY0Vot/b0b
tp05u06pA2Qv7ShqM8yWxvR76XsnPlqmbVns5NlA7wE1HPsgaRPYSPcfSH2tDZvd4r9JXJMUBo29
mKzGEQSG6iGlYl5J4GO2MjGO8JegnWw70jsmYnbmNhFt8YmfBjvRHX4A40UtaU3NyiS8jDDwVUl8
rNbmvO0sO+1saKRbGi1v7DbYiaQU+T6wIsvXcK9P6SdfRQWL/NodPKsQS+jB4v+Zs9MzM9/LHLcm
QaClojm9Q6dLtql6bH/cpae8y78k0xOpeuUtKdFHxXje1AjKzDkzePIJncgoeXC1G9gELHdcVDfE
RMu+od12oYY6Qn9Qu+cA+sIJWYcFbGk79Pt4VRP6ui+kc3q82R+rqbGwzcRZO5sFLqtWJ81/dDSZ
MaHZVyB2oHuIileGLtQYR027mFNeoBM74h9xcEHlkZmsN+O5HTu+E8Y8/bhDskMHoIO5E2YIg4U+
gwZLyn990GhgXPjgqPrnfTyR48z5Xc8WF9WKvPGp/lgR5Cjj/h66Y28O6LV4+Fj8DfAWgDLj1Y0J
rqBQ5fncXRWwYzQFcp5eNeiwW09mY+yaAN3zHLh3nN8zdfWYP72eUW27Ycbzq/FNY99hcoH0LBfr
Of4atOM4nvzltWLek+NMeAr5e1SN0QZF02nZkRXyNDnpX05nCyNf8EThv+aI5agMlb1UOZswSHLh
yumHom6CrDPQBhy9CbusETStZCnQyb0JwCtbVNUU0WCDNWsuJBlT8AqVg0hoQOQ2PhHzkLc2ZH1G
nK76PQ4IxpFRfA65x6O92iOqJBHzZ2e79M2HvrM3FNEjxxThiWafXbLi8Ofj+Omh+EMv0w8ztWsU
0+X1I1G2jfu2XK6ykk5B0G4AMlwxZnut269t+cKgmbYpVqImipgj4UFG8Skf65Wv5bTqIPt7f6SU
HoEKhO/TlJT3KRJjJBluUaq57iF7cGrD5hPkNDXUUArRPYCx4zXSZacrEub+FqIcqdmNoG577glq
utdlKkuAeWBaWmgvqF32hkSOKdXqU27VHBlFplW2tSWUuU3IvA72oz26sVBhqRvtJuuIc1bY5BLl
ZE1ofaW5Z//0yszYCAqOFgYgy2mmrXua37mII2yRlRKykCiUfGmohRWEgs2OO+92bMRqEuUPuCCj
GLzWi2Jvxh6/l42I0sLmWQ/Cj2peROHW/NVt7gtv3fDKmFcLHuzwoNl+b0R3B6fYsdNWtlwh0XHQ
jllb4f62XAMG5W8PLZVVt3g0OIcnRYraIFbnBhGFrjboMDfN2GNCXhEkFJN70pjc8ugpYQVgCsG0
p7bZEtkaDpPAwFI3GWmSQfynjB9/+kmFnkyn9jebWVM+97ATyu5qRgQMYjxBcdWWNND57OMotfre
hcPfGa+nHqVZ43hj6IJP62LspCEoxHbzMImT5zDS1/0LA+JM04n88fKTySEf1XTZN6VzCug2oG69
emkJKP/bJQUnsxOSQariJyQXGtxrXSUNqmQ7eXxSGr8F1SG3M5b1kgyw1noebBHT1aHqaRDWxf2w
k4vPEtMrZjNmR+hsQDBLrueyieKd423BaLzYDTtQP6QWrtswnlBgGm31EKNrQWBnHvFyH7SyRdoG
ZFVVKVwyRAp+uZjDwx/x/JS2RoLp8XJcZARJVa16CQK0zacVwfGSXz4j9J5Wooww0P+BtIHT0urr
7uYNQargLzJH+Ph08K1fUeWdhcuArFoFXKUTvdhbtkbbwksiKn+X2H2UUM7bfZD0tFA72Tp8aJsZ
807vTI7adkp4hhv7uk0f6x/9iabkCa3MxN0ymlcFZ5J3smsiBWwUnWt/tO8rCj2yMct6uEmgpC1r
CLQiLuhCaB3dxODY50ZQblJuO+xAVU2r6chdbiiOSTFm0PKhj8OZXIzxSKaEKTeIdX0FfzVFRDq5
bU1pkSyjOvuujLGoyo07jEkdD6yUQ4XKRH7ZOOq/dcXtWIzDVdtxI5mDtcSMdwdMpWYdnBMqMppa
c23jBXUYQAPQBtCaTmj346zeNMClPPJ9feWm1tccVrWin7qWgj2Ovi0GUNtkhPV0msBJ2Co/JRR0
lB9DBuGf7FZfaAwOy/KRTDq+hlP+0EnOCVX7umKZ6kK8bYJdMzhJmv/x6jY+jPV8j8sNe8Nefbe9
4sJxtF8qvkMEO+IEr8JIO2Lcatwr1Tl2eb63UoD9bm3poaOnHsBexa06mrC+PGzxjWsfYig+GE9S
J5qIFoWzm+/jbaX9Mfbx/LI9a3H/KuMX9h3gRD/jf1AftYLZS36H2T3BDqoYZoHSG+1eYFxGsw+C
vnkHhRWL0MiHzCksRMRnwKcUQgtfqY3T+2oVusEwYgeoU06PKS7KihPIa7LZaYuBF6HEl8CKwPxX
IQyQ7Wo/8dLcD0meCZPPD/21v89OlI9SQXn6QPE+jJ6bxscWbT7Gz+jFIIPcHfu0Zd1eEa6alynj
AtjRrLlAbX728D0mtjiDQ4fXyxwxmwQ4SlhkIxUR9WQ15ccjeUlLU0qEJhcHAnKliOTVvcVsIoEw
oxFbb6jTuSGdm25/Tfq9tl5TEDrfD2wUwpTFDKvLhxq6JLZq1PyBCGK2z45mn13P1kXkMpZoFUXH
y8tqiuYfdJs4X40vKYyryeD0Z7RHEHSnucfBSLOqKXdsYZv0Hs5mU3syc8vWjDqa3N+CG4jdZhYg
xjXxHMdOF07lgoPEr+hI4tLB4Ci73hjNpHGT2RX8sF7Nzs8rTHKmJtrOwcVsGiALMmjqM9Nyp4Mt
Oo8CKIq/cf9QOV3Q/HS1Vga+5WVHVyniZMhCLGIRazJEp9zPgO+vqwFwqbwxqVOI1Cm5cON22UhK
G9k2aOMjBTqCZ5O9j9LVVLBNVpWx9ME3ArZmXGg2CzwQsMeJwQGjm86YAVPq68tZQyGpNK3i99UY
n9lphdJAtZjAVsEcM5Xuz4ga4HoUEiiQZLC5A/64x7wbloCTStPv+3NgWBCX4AWgE8QLkOSwvGP4
9FVzNqDj0kw2K7TSzm8Oty/Sd7JIzFIH49W5aWVAMOwYFEHF8QBYWyVn5cGU3taOGeap8P/H6iOD
JW0nyT7z/cf6LaZTWJXMppsi+bfsYP+86eUaHZ5eHSJaAHNiC+flbCGOiDr6R4FvcDXYcgK1jXgj
ZL5ARdu8Qqm664h06fiuGxmT8XEcOQwP2HLP0QSiWsb3NMJ/Ul5Bmu4xbfSOKPZq1JrYo28EeukP
Qkngy/O0sl3lmcWTyGiFphmMBQcO2eCdv5CcRgI+JgDgpiQHJ/MoOJn8Uf9B/2EXriZ+5/Ln/rT6
eNTFZyelq7rNCTFx6XYhMCd5Xn1E70KGSJvgCQYJGmSYaw65jn1WgQ3TmIERCyEvl9Jz+jHwEaT1
705Id0RqzktR1FOeDemOmHAuGT0OpHie7eZYCtiQK38VU4GqSKQjbrAC0GSMR5eY4DZX2yTXgjNt
2gUm/CiwQBg6bzBSoEhxrRzrbFnj+tfvlra7pg9Kg+OaIDSk7JH5xg2tDGdP9t7Big6C3ucHpPb/
sJnRTm3ErTkwc9hFl0b1MSw9cR3n7sP97/9upDzikD1/OHr7zd9zPHyzWdICkskAfr5HERbW3Z9l
cnEl6Qeh8L1EEPzPCf/eGdW92iyoe7mn4PTCuuHUbDC2ewodGHZNha4L8RY8ZR1nQoHd6CrBKvQK
2T2ziZsM71GBIZTYE2+I7KVGNlv6ywV2+f5rpiPK6jOfspyOC0O/6ZcHVgUpuhovcA74BW4CPAxA
K9ftZV0zRCCp/JCxlwt4U7YjkEcZwKC4dwP8Tq6AEBo2ahkqGkBb7jssG4h45LokEcSyh1RsRpcF
l8LKHyUDFahLly5iF3oCYiCQVBUypxOSSAAC3hwTpxSgKnhY4R/s6HJBc8lsW94LioQcTM7SYkJO
+iDljZG/c++gw+gQ3zeSEWrlfuxSVukfEcuk64+vsaov7D48WXBD1QtJVgiVrIjVJ/u2qn6xXi8H
9+7JFqlX5/fmmOdvfc8c4z5iO3dVoO9tJp+Wzk1lT+R5eaLCfhXhiXY/D8IMxC6I6uy4kecNu8lZ
YSdeMrr8vQVSZF7S1iMjBd/pcAKlZ8SiRBK2kBAGwYOPNTaniN3UG7GH0ZGmYFhMg0hyXmI8fRJn
P3kF9oHwIGdumvoh/dfaduWoFeo92P8Z78lr9aCkfvEXssbeG+XaSMKB/Gs9Ud1dEIjETK1/CyHU
jJJrlp3rSOg0LEem5TvYfHJ8jRSSeJeKIcCHMWLQIrZE4j+Bm4ZDxWtzergAfirm6pYgAc8Ln5I6
WdqhKDIandb1vNXxAX/kytyq0Zcs6sUfKwQQJp0Jk3BKHXjUwtyYdAWJYKR1GN5CmmVHANPlbKqw
eqTZ1WMeSZ2yc7sQNj98LRW6ZkLAtmt+g7R+Uuf46KSXvSFplwTWhPacFRPH3aybfZbpiv367Ax2
enY3+7yE/3T/927vJFXb3MNd1c4A/WhxCUTU7u4D4ck9kbBSe2b6/CxUilp8ZhSf2Z4OHpyUgSXV
6Y661B1ithvWmYJ4CdxhxQHsPy66rY74XfzPQbO1yEHT/uOB3vXSW/RfQYBbhszC53vZHglgnqKc
asJPKywZ08ItSqDned3kAzOPNcg4eDG7byiJUs53Yj5QJ5a/ctBfyOCMtwAV0MCW0WGYGpcprNYf
mWR0GiR4DyRInwr+xxCKtKxTD7wtdWCNEpd5R9CQ9YNQs5kGsuXqmN2s2RXJ6mNAeFDTsPbW6rWV
ojrzEuGzXCUL+UeeN5e2hREAfPjAi3H5pOsUDzkdkVgS7olextSnZdrXIXmcw2nEe6BrtKPbnSd8
/w4Rbk4rtvSwN5IyNRCe2DYyHkNA6QftfpS0WWQpuf/fvH39/MU3WXfv2JwuuUpztjO8hOBdQqpW
pzrsd8vdU0+3HM5w2bbOEs7bSsrfhVGG0VtvrduA4m9pW06qj+Lv3FpDxmYkBcar9OHHWkQNOUn8
hqMt5lX1iaNUOF4kPTbxJ8MRKAslb4k8dIcSR2MsPtjmgWzliuUqZbK20B57zK8pazfVIENoBBUd
rjeOlMYOdjqdf9YKcWDZ5CW1H+RMFMDI0qQQ/NrkUk1CjSEToSfr0JdUhUbOD0aX4NOU7xshylFV
KprCFgvWUl73HHCHhVh2b2mcVSXyMiTTMX0DMiXORgJKZ3lD9xAh6fDfap4SFFQZDQcSt+LPdBzd
F2tH9OCC4XrOZebLxITqXWyL8Zbf4H4PrK/BOvB7qXv84uXb19+/OKGd6JEJ1mVHZPBoxOmRCM/T
PFhUoPDP2oV3yAF0Ta824vSovEB0YdIoGEfgGsSHzcK80JvNBO1lncARRbZOXDD3ml7Sc0QNqS+P
at/3z+DAtUWs3kGFiLmjQAZcNPi+Rg1qnSHOouBLcl74JruO0tVTA4IKw2gQyMyYb6ANhgND49GU
EZUtQRvK90EtVRGi2HT2gpJxm0s978v0xtcYMLgo9rdWbm0KpJ5vnURQDjsz3Lq/cfi3OtPQa9Lr
FE77GiUR3Wf/7Fpfr9usSdJW58TCpgOI/NXd3bddrEmmBNuOgLJuwwIjFmdf+yUt2y7el1jutFy1
zyzZiPu9otD3OFS7RepgYbZsi3a+3NlBVLs13TEhAKc3ESgq+06gf4HVPN4O5m0LllUK3W0rwtq1
C6297lolfBKpLYUSZ8m4WN3f0X9ePXnzphvMA6lIg7kw3OEeG/k/Bdu2HXlWTsiaIuLgawSCVm/H
Ozp9iJkJ69cgH83IdG4TjzSj16IswEqL+2VQNIGA+pvnL94OyP8iP1zlGR9belvBHcsxvt2YiLEE
8HSEvmSNejTdST9HhHeLLXlRNT4qL06PQPIm54sQ46AIHtbr4KQilPFIpjBaKA65b0p/W1lav0vS
4nm/DS1cxbOvU8TYpt5Gq8cQ2NUUrsLu10+ef4sgRm0NNG+SDYh/yS1H/uyTOksm+NzAKrrOIiQg
FvGAgkfrq76CCabMeSzcqW3U9R19KSEE+ed71BRFxgaaodjAHkv7dBxPSi9jzaQcEgvfE4FSu2OF
f9kBhkxIARWn+NOybjyoYj8kFbtlw5OoW9lBsUTlX6nHtWtbc5PX+4zF8ls7oOstI7q+9ZDEujX0
brX2QXd/Z7Y7BkDiyMtEIiQSk2LQFo9QZp/XYo7xJnDHWZYJtBBQWyfQg3/CCQxhdvUEhr/9+ScQ
LzqLe8abx58BrSIMFYPee83573MhltGpeu6FxLFB3/7oknmmQOSi4sfY+EnHN7YESuU7ygnDfkee
rdnjx2hPadZT4EK9DJ3Mgebh5awxDiaZr1rBT5SowGEHGocjnoVLdBzvYvcUF2rpNWadFRmtMIQ9
BasM+loczc5Q0TUl7twUwqNNEK6bNtI0f0R0FVQ1h9ISxioN+fcY803ijTAAAuOQkKk/hFmh0r3M
C02a6gBJ+MULjqQKAqDgX8pUkn4mb4RpnJPXJiIrsBtctOyh9cm2L9My18x6x421S7ZyCGqeDJTL
9zkNz2zvaFLvOPkJtj3IP65iKD75olMs4nQPnMxGclDPaqgPxfX/shZXSQ01bwlQ9IrsjUCECvLv
nLETxjC9r7ScxCUDBYUP2z9ScDbejdyNMyEuNvBmNyHxFj+CWRXefInWPO7NYHCc2K0rnq7w8C3b
HkuWB+Ifx/84ONnB/RGXOTs+mCIc5OBgOkig+lt0/y1jgen/8OD7/2g8zPhw4YMWppsCTZawIB8e
vv1/v/rFL6IcKmShg+cE6mY4UYrwmZXxKyQfsR6c2TXqwpB3nFcLLho3uVnP5qaiNaba12Av+5IN
Rk9MBeKgnQ5eOOuLVb05v6C8P9q0BD2srrW3/2ZVbUWQiBIfXU/EKCrJ9ejv9Wm7kdtCGAiGhGn9
X2ZVnHwbv6RkXpwLWbyTn59lT9HwNHbBLvUZEcCIeHi7P0WfR4quqbAUvG6vbwwa2RhmQrRZlIeb
v73uZ9lb9K1lUC1LlJy/qbpkXnyKu1s8+djki16v4+wz05XPsNpTylSNPMo9NFfoIpqdVvP6Chuz
aX7hXtnYLNZXFbu6fMSBcy8o5DDuT+GP/ilMfW2mgWcbtSgyvASla5lMa8UVX0iOSqCxnuFMmbgT
bpX87jfrGh28J+THBLOMsGFID8m9XFMO62VlMrSQh7BR0o5VY0AJSuFGJtwx1wieB9mDeg4xqsNN
i6yXWT7aDh9hA3PuQ8E4Y3o8GeR5zbmSPY9rngWkpeacU7yJ3268lKMRlgUyFKTME2cg1Wpx8ZJC
cOFBOZ5V6POXN8akRVtVGgLKqvFZY4ld1kbnfTab+OudXV3UjeoKAsnThIerLCdmAU/5DfpM2/iw
hhfYdGS8gl8pzgSBu6wvFGeu5aGpzUThEV+TQZy8B3tw6MjkwK78HMY/x6hEyhRkm2VC1H9swXZ/
mBX9fr9HNtReBn+yFpBc0dmrflpXDTq7nc0WGJB6I9hO0gL6K6cpUgYkJNgz67TI6AceTg/+NnOE
qXJu1hQZgkK2nsunuH2AdRGWBkzzbEru9hR8ohN9m1M1hz2DjPljNb/hGU5uL3TQR3i+FXmcwfYa
L8hbHvbr0uT4lmNvtjpdKWs6dWfBYveQgiSLxEGoLchjRAdAivWVVQvxQBBOROP5kX8duWohoUC2
1zPtYeAs8NtiVddr6hrNdC9TjrrTJrhHEDGIsdSj2hGwDx9gqhD+ZCtRAfspEJylsJ2abajnfsCf
rWxn4xgonCRcxRIuMS2keC+Y4+FjBygLuZ5fMZzYi5lc87WMqthsxMcZxDK7mAGHhhN/Q9PEHBiv
Dk1lVdH5woiqpanOy5Sjptjck9vcBMx6SSf1KNz8b3FBlOpu3jQFCuNyJHr8MnHpUX3dswhi/Vnj
ZnoQAoaiKG0KwjguV7W/IrE+Xyr5G0EhSuDvUQJo9rE4fkoFn/pJXbGvUvdp35yxk86eniRW5Y8/
R+EgOvtisPHsDOqj6TolcFsVyMru2yI+d6WvtwvGljRfK4OcG7ABAIlH6Mog41E11Fv9op5NXJpt
b6eEeyQ0XkndLR6ieri+FhXfbVKfHt5HSSpSAgG327fVHXwrnKLWk8K50FyH/Frqpshiur8i/6dc
Zs52pAf8en+IsvygKQ5WZW6zBnjDdRHP3vEsRbkR7I7J3BxBCr/BH0hjAP9ATVcQWbCvLIPfVBx7
SJcpBVvjZlbNp7pix30LpW10/XPzaiNMKpKW7XPjCT/I4BqoKsbAEEjZM7i62Qgi8rH1ClcvMIOv
pNyPR/LSMqBgq2QqWsnZbKpZ1724wh0MxJwfUsgkzpMJVnaesNTtpv1CE48xkAH6z2ylwl/MsHwf
4YrFozx/hN17nKeuNmbVuwpPOGmHPHZVL57CN9/Qywq9fEhnhzwYv47s5ibjyIoyxLMiM3JQ9V6+
t3XXdmGlG8GJKbeCFYbvYr0toq9g6bjr5MWhQfxUAujN4pa7gNDx8B1wi03wHUl7BYfF4Bdv1pfr
4liv6Em5a0tAV7cvMrey/wLLul5Xk9FfZWHtpCM0wmiLf6Gc2Fj9UoRrXFqO8wLGUnhsRwiqewxn
/oUN1hDmgbCUbZygqTcrgsLID8gKSGWbojR6CusKjZzcOrDvjIHgiYe2yeVCHS9ur/wzL0WS/0Hr
3Ps9h/7ve6jbbwhvrCQsqaXG56z72puGFOS0C2DwE7//W56gLXch55x/YUdVcue4WCjVsSe8OVIh
qW1sm0OoDJTqJaGG7XGYpeheIxF27EsmCZ9gYrypQZICxKjc2Jb3wAYp1MumFZjL9+QuU8iMZJ+f
bFh/h8Ibgo8Rvoy5t5uUlp/end4G0rdcWsuf8FSnkYRTQ9/Gd9KD5Ny0rG2XDKTJ3FBuqrV2PZxf
n9d7rZkz2x3hDUG40qN5dUapkNVXK0zOgM1b0rdOuBaeyVukIwn6NqQRCxL0J1Gh4Qx5bowwk8BI
28IkYkbRxix2CWjqVFGHzAF+spjuc3ih2L4H12yBAPo+ChUiiSwphcV7OyFute1s3QNruQ32buIs
2B2kVr2z8wSrwuUuGMbEkcuLPLub5XRt5Rw5p7uP5uG8tCBQL1f7rNTL1f+/UH+RRYJp2bZGBJKX
fU8JW5SxZzjsvK+q5ZgAy2ieSfvfGEUw/LUcYxQYGYn/JJYZEH1hryHsECaoW2umQoE3PVvuOQKB
rLFc8V+DUqUU+8m5I3BgB+0m6umTFVqbU7sq3lmsQvDQJsL9pYczdH+We2yexOW+cwclFss1ahPe
5+nJu93/tm/M211Mro+fdq3gH/py+utfKpKxTra1ObxuR5VyGr6cJY7Dfvv/yXQq+78IZYa70R1b
qgPxZnPaVvFwa8XvNvO2ip9trfjV7GNbxXvbW6xbx3iwteKr+qpatXS1va9pPsBr9DdhBNThJCPA
X8qobCsjoGGmKfEMxKVvw1TUid15YJNsBzuf92TA7Wxkb3o0AiAoI1H0/pZ8iYRmWqefLzTzyP5t
8Td1UpwqC9MvYbDeXi9gKetrOwwQyDZVhzIIqakSByOkUOY/V3lxu1sx7MVQv2X/xmoQcaVKMAPy
1/LTJqfYQLts/HHMmU70YTxb5AOmxcP/KbF+XvEi92TtsRW0Y9QEHwhhzPro3zL+TEKWFWQa3G6+
xs+6wtJPUXB8OibLUfOSZBg6/vyO/VM6buWvMEijJVezcjBFHR1aC3GK/Rr4zbFUO6EBpKV+099W
AAZZj7tD2wmQ3Xt5StURvUzG7Wy7BTzANpYfNMODpkdKSOljz/Sg3KtxphAQaOH7KoPAahTvKPt1
+oTYn8t0rVsuK9bLty6mo5xYVDWHn+EjrH3ZkrNGdVTXUwtopmvaMl/THRM2bZmx6adOGfoCbZ+y
6d5z9kmTRpWmO6YtrT8sDpoy1h4yn9WaQ8QuTTyl/VWhcfShT+wSDZ0P9dOGvfIfx4PDo5NOYhq2
3Y27tIcgT/sM6S9tSBU1E82ZsoXw9kEdhNbdk+yQUt2v7GBic+oOYTdHYKg/HeB2x79+Iq6DKKW9
LGHQYyHoG/Fv2kMGkqJ/HStA8gKm0sxN+daF7mw3j+3cJHs9zv8qJvhoLWWkRay+9wavYyoEMNm6
zGWU7sC5EBt5pMcOyJStmBIiwzIxOHu8AEVuDCzBXPXQJsAJMbpsv8sTgqjYNcNVNDWjtdxiysNh
jMwQ7HIasThGuLrdav95lzvsqwdBRHpO9fvfiAOQoud1dWhjP2zubQJBtx4YpPsxRgcKyNjL7kAl
93EBoeicJLPAX0qvXJJZ3OG0EByZwId93MDWk/CxR2rYyrXD5w1EfLf3i6GJNy33x/3c2WpwUA2U
g91vfeYX6TCYv/qj2q797Hyx59pDyX3W/udfFDstC6lV7Pf7+A8CBQXcNeWYdEiA07y51sZTfUxj
vDTxBBKu6E2Ae7qyupS8YmKttmI9DMO+xZcJmjjxy2/zXtrDcwkopByXEsxXezH9jS9O2ZBfzZrJ
eLWXFVSK/tvdktE+NNl2cNn3GCCW22d05JEKZbfZCOn3aAbgyzIqhmA4ZvzsNysAKyY/tmk7GC01
24881Bz6mvsyafnEVwsGjzC+UHh+/Xd9UI19XSXMGhP6UtRoAY8gDKwkcFUSuzDgT/kIVyYq0H+8
OzC1opHZ1tiAxqQZqzb4aw4txCgI+lwclVEBE9f9NRVQe002Krn5Fo1NKtH5/9h7myc3kiRfbN5F
zwx6+6QnydZkMpMsBzRuJkgUyGL3zOxiGz3DYZMz1OtutsjizqxV14IoIFGVQxQSRAKsqp7tveo/
0b+kmw666SbTVRf5V0R4fCQKxe7p1ZPYNsNKZMZ3eHh4eLj/XHlOR1icvq0sp01q3YgendbN4wdJ
9dvute7WuaTzHDTpfeRmeawOhRFVpaPYx9uuUu+Fh1DraG8owLqnGrCLzFBDC7Uizd/58P9AAnv8
zfPsQfZ0CeObreBYvWng5YcX2JFQGjyRVu6Vmx2OI0uDKAC+QxP8A7H0QxIQwpIycuT9eU/RhEAW
dM9g0LmIbl8eOrEKVNpgoJSvV2XDJH0Ej73h/mTvkaI4eCku9ENozLj8hGR2K9JWBMn+zRY2Wu2u
BkiahhBxpB3rU7MUR2/vFiGR9slpl2C+Ko6dtS5JYEFOH4N4dW0ENSITE3CMeBt6eG+yWcXhYgil
K8tebc/O8GxYL4E/JspDN3A8agrHUdb7p+UckQFEWMKPaN8Nm/nBAf8ewVKqlr1uajFLh9kJQbBS
L5qzQuDRHHv1WBx9i91tDISeoyoLkPccycAUymQtlEowPYTstzklcITNqU6wi0TvmJBKdiViAWYz
5m2a/MoNJp4liGOjCXMqMjh5b04H9jTWGyB+sgnaeUU+XuF6h/SJJc8B6tPOSleIH6Qd71jswrVJ
Z46ia2uRqSmRSpYHDOYs2CRQjB/V5srO3Q+UB3AUZAfW2ByCsqWySCyg/Ntl3vPTHT88QaViN8s+
+8zYSppNvdciLGAxrO5UQFMYaYS1pkNXTiAshJpX1MxggBKFFJH7p7qhWSS5dzS+4sPp1eb48JcC
FGF8pOCliFwo7f3EwsfuPSO1XfwF+XYoG3Q6Ffnu0mygliNHt7kK406ZIJXiNOwAIuZF7BrxCwXV
nPj8ift8Xlwl/M+W6ISddxwob9GFarJ7WBo26xfdnv5GXLfoxS+LuVjIn3Hs64dBmjkXd2bzVjBg
n+oUFX6PysZbO3hJmR/6nxRveHT/k/ufAnkt6skGC2AihJnrEvfx812ZfrlUKmo4VAakUderJpds
nAI2sX6GwP6H/exR+gs3Xld1MbkqjrFE6PcJ9eFTvy35eblY1PkxficqOPdqzc+2b/n28pxGAb69
+/T1f8ZYJe9+cfR//Zuf/exO9s0/Hv3+xdfjxy9/9+TFV998+fTo6fjFf+ygyxwnHBKiL82Oib4y
WdhYl+Rz9w1FCBxQpvGYgErxcjpHksxPdhEr63RxN6dIPM0UOD9to6fXWS6BBw8uBNIx71igXomh
x0FWEaUj6+L22nUo8Ab+YV7DEF2SPEDSSkcJLhIyzG0Hr64b2IQoyqYF8a5YDr5D+7eJmmQC9HV8
/BfGnTLFY95+9hpDNRIH7GcYVBqobMZolcBFphczZLxeKSa7CsbW6QQ5Cwmtt6pXW8Iplgbcy2y0
H4QYwXBi0pMeBwnsdN798vW/c+1dl+9+dfR/P6QAbDLh2TdUx1fA388Q0Ol00lTTDNFlqsmi+m5i
Ixjjpo6Bh2jeFeSXPBowHQe1c0dCIHnDj90QdJVqPYXeGMAdaKusLGDSajBUNDyMQ5J9jqR2iBd3
n+YniClGC3uxmEhA3Rr49oUR7dCVADE76hpDlCFy0EX9nqLnbVdn6wkcFIHscsaz82rtWa3M0eTs
CMWZthhAIezOeDM5e4RYHQ6NwX6jg9k6tGPgSzQMbYqb48MUWP8y6Zxqm/ZqeyoJCxMF0W0E7A8n
kV4lGbSxoav/0BFf2XjARxTf+hQ5KrKcRYdbMl9ojvVtp1wnN+abdxUa2yeYcmCgvOjXPBTdzERx
mayKBmMOUIvVHaUZNEIUvKdHshNJVcdezV3UjWTHd5sTOhIXnKtvau9n3aFUjmOl6jzpeMoniYfI
d09L7lCMuhaDYEHlGca+cYdy2wRVZC8CnKBS3OxK8OH9ZlfToPgiUjBJXN2xwYEZN8oVUJGqIb7E
4RqK3BM1bwTyMMvmGMs+SdawE8NjZW1BCbWu3EgzeEj4R7hW7XrkBy+7jGy99gbVvm1dN5zOP31Q
tye0KhgAAH8Yxjbs9naBWMSqL66BD4WdXYOIAq9pb8TOgGe0wokhrAD3OBwx/CL4Lf4H073JWaNK
ZRhzKfVeQJG2wEGCigchAci0XFyHE5OaEld0NJODcCL3Zrc+Skxh6+irEbivODAPuLe9+ihqiZE/
r+u3uI82YUQWrhzLfiQRruPtZYw4IHjzHKABuQQiT/i2hPzpvKKr0egDneXxytcQTtFjbFUu64I6
1u0lKxrPKpQbSVURVdicbzcz9I2Pv+EowOvfw5+X5WJyXdhhwa37GPak1cUocA+f1SokKg+mFxiV
hUlUSlxO1jOOqbypTitYg9chkqhXVs9jC+N1eQadKtdE21icqUx+qurUScH36uzaUkyuaADDBBQ+
ih/VvYgkMiuBBp3JV6JpghyJ+42nH9W7gaYoBUtOxIfC+8Fha0AHUp27WIY4MwZeyDQkN+eAnDhi
Uc3kUy9hJ1g1pjvlrNBd6aVu0P4Bt0fR5fASyxBzdDK7zlwxuLmOcH9lvN0bDIu566bmaHg03Kdb
GRhvi6sD2bjb9wrpta/eY2NDJ0j2bnzPVPiNVmpJROHAnGpTD6ftzB/S8JZB6CS1MQvPMJJL2LV4
x/Kzwf/RxOJhPChGikVbEkvS22WSqIWcl4FKSHogHDFpb2u5pQk8xS+I0EfL+KbTNpzPClF/E0If
wS/Fy0mkwDgwEGccmcmPrRZm5aKNZNxITWYzy0PN7Sic2KM9w6Sx2OWYSJm9LRvgcn5JqozL82pR
BiUFV+kqhqGrDI6qIXAIOif09t+Q9FBiPOp1lNnb7mwa5dOgGEoLh0ywRJ9EersWlaNfTR0wvfY6
2usF30okIkUZGh1h3lbgMH+hwPTTrig9w92R2P28uhp1JShGNyQGzDEYu6w6E//xw7kZ/UMaakXR
vSTUeyUFIEOob2CMablKA0DABqlHPVjD1wMXokwED4TXQEhEroBuNd1tBOkwbYm7hDoX/jaY99RA
tHcjqZYOL35ucoxpW/J7nX/aymBhzaiA0LxABwkWHRve21KQaxBnYaOeUIhp/uYk1XL53s44cr91
MA5I9yMXFHUA6at1zQ2Q1Il9i1bLDUsM0piLkH4ebSB6zRv8T2xsSLVkKwIU7xan6VH3m388evrq
aPzNl69/9/zrV90U/ghrqcaGEKCcVLUU5G8DNNVA4TDPfAl+Y5hjUgiu3p7BTs+XXo3RmuG5lIsa
c1l9tBNig9iqXn5db55Z0GRFI88pdzuZcNhPWDuwEU0ykIiX2lqWPO8JVzKqvsiZkg4PQyt4kQUx
UkLo8SMrxAMWT7CohFR5/KthhK8oNSR5bKaC9EQf45pQ21Mtg1CeSQNGK0ZA7aiZLZLOgKmp2bPW
5LnJgvVDrfjOClFJPAKRm7xNLmCKllBR4OPLLcYp9Y/mjFqzOezDPwRX8x0wXgaJJbXe4fAk3sUw
AwWqOVh1W7w9XfXURiirwBqSDbQpbAt9ngsvPIJa1sNumiQh5fEnMR3dbre3fVASqhJKl2n/3aTM
f3C4Q3qOG4Y93+FLFjMnzJAYUTzS4ppTp2P8yXeXCUHIdtNPKR32Xw7GYzTcHI9TrNO2gNMG5aWa
Kgm5oWjTqQ7VGwozYY5I8BFOSCaMKEshcDwq/AMmJr0J4JSj2hYLWmasXUvwJqmeU8exF8z+Ele4
30YSfDT9D3baWCcoaVjxuEOkNWXdtOW2b1Vs08vtDApNCCqn9WQ9ozv19TZpCrv3xgV9kXr23Ua0
ROSPqxSEm0vvlva7pRNx2q/wFYEZxY+IsBKZNxmwODZqX8RUyjZnVhpOhsZsL1Gp+uxNhIkqYsVq
MQMpHFFRfMte78ZDv+UYtCojsmhlCLjC3RowxzoFoa21Kk2rAqDZpQFwkMPyO4AeL2w9iKTDanp7
qrlJclcnIe9QStDTt7nAsJcv/omYgOEjcCSj9liaLu7hiIEguYp9Glkh9ldKQNJhXiBgE+Q6b8GK
w8Ya6sIsvXQ8z7hUzNhaqFIktRXaFs9z0d4amREbaSMxL0mJDQObbrBIbPNOvQSRgMHoXsT3eWr3
w8sSj/Ob2cF28xLgKOzxMfYrRKFGWIdCUJAZB8EuJVeCWUTH/HDSi/EWuJIR/+lzwAi2OWajOxWW
Vkx09W4g1/m4r3EzRQpz75P2UwTKF5xSFe6LMPAx84qQxQMnZlbSHMNfL15ZckNR5anm7irWJTsx
Nzt2zC1IdSYDA0cqChVD4aQtQLgYuBizmuaBAazODGJ14kJI0piZ92cj1OrTnJswMUJy8jOgUS4N
0vBDytkjpZRTlaMNk/t1M4g5h6clQ8OZuLVBb+5yWBs2O6TYWqoBvX5mX5luRJPU/czOA+xbZpxG
d9ef4x7GtfZ1p7XuU+i4Xecp1QYiEL00+4mZxVjjaQfZCGH8QqYkws2GhPylwEAc8eU6hxbY44Qi
w2e43rpskmw9nNEbgiE3Sb1da34tfUmDPI2ajIWmcjUFdujUFSZhlAiGQLU0Cig7nHtsflyqh/rg
6MK+Tm0Q6S3bnUSZxxGuqIrf3RKgOt2MkLBlZRKDtX1VGn7kN5IUEm4ZXBybQbKLILoYZmOGuG9C
0qyJQ6GRq4nchRFvmhL9ASeZYKKhW3XXMKSuoXsvzA4Ho8FoQ4bvoBleFzvURV8eaJAyN6mW08V2
hlFLlguMaX1dbzna1IS0uBIbC76eUlwUqVBZHX6SFbA6p+coPXDcpBXxZQFb19mE6A1vNXfiRvLB
kdE+3xyLISIdExiCtvLu2ExE94aYmFaYS0Q3CW5pYmUgX6jgn4EZ/M4tBBanFrU02liWZW8rks5G
rqmGcLi1ZCLkJxBWFXRGtd8f6NwYg+RyTZVqXLp0sdi8DAKDQvnyUiXzBhPteSUJZLFzd2yKH6qP
sISmQDwbb17bRvmK4ypFFcZUgjFIdYnWAyFBKs7JgNeoNX+42VoEDSJ23v7EB0mVF+WEYA6dwQUI
AfZHZDOiU4UfVxc4WRdpa5LVBT+ykQ5dUWBhQWhpUz6ye1tX6rLZ3miZVGYwdHSV4MZMDx6nHbZ1
z+yk9oVrJhRKcU0ZFSy+JDY8k/arRpWQNhJMaMqjxtlKPGHMKsSEz+fqM6w5FhWjMs6nYmeD8pO9
c+ema0lTPfcSQoc+r3Pm82mczoyVd2NqbSrY/a7omoCEy/KS5qjbYrJApd1kGmK9vzAyHZZ4V6gq
I1VH0iqEB1wTSM/B7Lmx2rkq17huEwOZupCltDAs9jkIyOluQ/YVxpNLzq+PX+8de6j7met4ZmRr
G7rAi2Hk2/zdi06s7nwStAg4xsDT/QTBELwbzhlGC7CnXXswVpax9F2fo5sfoznuAB0ph27XPpMi
ecwLyYRNjVQjwvOBS8UcVSyj748yd9dwgWs9UhSkT5Yhge1UgdHJZaq0AvGppeWkIiZUsDqr5rzb
19EwugcHn3fRYl/1co6BpRcp8+G46we66+oQ03n3t6//Go39aecfW/9TYA7v/u7ojfhGPKsoLJ9y
KUUH8a1gypgDMir6WRWv0GXEpzYTxKzs8aujQecIwzey92ImQKqZq7tezAaraw7cvcUespNFwsti
0mw6ysWC741NZ5wvrfEpuUUsYnTbQYUHpNxgNk/L/qfJ+4mgImEa4yZBpp2fZcWjfvaLfvaoZ7zR
MJbl+WazGj54cLo9awZ/Yo+hen32gK6cDz/9u18x6gP61yL5FN3f1vXixQrDXP+2WvID4Tjz45eT
i9PZBJ+ez59e0asvQG7vhsqr7pewYjHAB6awsCCS4x8x4hg+SAQQeoThjkt5CbwQv369vcA/rzb0
y4qD9G57yv5LlA5INt0W/HqEBzIRbcYIM8E9fiYi9hflnFqC+4s8vyR6pV6Wi5IrZBCWuJbH2zPz
Ket+g5soPsCJDP/8AVUZPGz0E2aTyseNMi7qaH3N9wnU6vX1M15vUjuQC5VEtOWengENxkU9BWZA
c0Chb/AJcRqoidBNmmbEu+fZYOWqGSGkiTHheBBT3hRGtKFY3Hx9NVf3/ExEanhvlZnmwxmBj6sG
1iUtmXURhNUWLkK+vS5aqGnB2EX20gVh+fsX5JrfcQe0PdulhHtMwOgYDuRlz0YlS2G0lCAaeRHe
PQHPImYiUU1ZjWYdrVGmQ15oA9cmRCfFqEbdyHptOkF3+93hyyMYGodA88FoEoJJowEkuNb3MFjA
XuD7FyUwOus7DIJ0G8KDZBnQX6fMbnMVFi/gWwI9yN//X2EwLGuBYSC/WuNHXs/ncHCDto0VZMHt
nMp9n/HQxdzDB3DElay3F8EHmIlKgQgkNm+TPoZCFZNiQy+8VAyRCODA3nAD9KGIDlz0epdPW7cN
osAhFOzGJ+h+u+zug0/g9/LhyS2hCrotUAXdW0EVdDiaRL0eX0xWqKm2URF+W21erDMg7X/u9vXL
P9b09p/8t4+BVcLbv1Fvv3x1Xs0xlkr3s8/U65f29eefq9cYoQLe3e/6sSfg1UHXiypBWe91/YAR
8OqBevVsUddr815/wBgR8O6uevX0Hb4ZjdSrr+sNv/25fvsl98V785Re6VS/4655byjV5zrVN/Ul
dUP343mDr6rGe4WBa+gtEq/+sqTXS7/V/JZVnN3O953OFoXPaGqlUEx316vORL/p/ov3/rWZCf+t
mTJ4i3UZALhwE+EaZ+U/8KbhtlmbCHdUDimKjtVni3Jygfxwvl3A9gqlnTFbZlaCCzzbtf1GuP90
ChQ+SH/1FQ4I19V0zBuZqH58ieIOap3Ye4k2k8tSPMLPJ+/xjhSVKxWa/yJ0BR6euIma7+wSe/zd
WYXM9dH1LXIgH8xxc6mMzcnNuJVGveKGIo5pqzZwKxLdBEXZGoLKr4swU1N6N2FF3sku0P6QlcUu
iS8J0XiMiU72GT4Q3fHI39031Edb5NgfPnzKyL7vm9rHRiFcjr8DFN3phOlyOQORlc2fSPrVbgC2
7+JxIfIjjEQ56iJRdGNp2maRxN3P1CHdA6r9nNHGtDkkLa0xErYTj9ctfqm8DAnmjvf5sJiv2k0q
NYgiQnCe1rOUjkVWOh8F/MIJCTTpHJEgUHchrjlICP6MOgQBgDbeFRQhdBBjOndviLNLMgOp1auZ
DkUfU7UW7pPETHXcwA520/Id5n4Ig9E0W+wYXssA9SXr8a8VjCCGH5N3fqaTISvYwS2CmTRxVI2N
8MVKS4wcJwnfUmQk74OvZaI3/nzzPPsRldjQHKgGRKpxvTKm5lRDvWq4BQNy1iVZKwwtT/m8iulN
qmKpwucc9WrcXF+c1jjWWp47rlfuZH6yg1f7wUrjcbAV7B/WJuzTDwxhWrgm3MjVQ/r/kH2xnwzF
NP7hoZjUzP6wzeN24fNk0e2ykUlFx93HAvp2WNZxLTctu52rw+GYrHH61UmXroKmEiVZ6/HxXUr5
Tx8UnelmeHXudzXgrzBtebQ/H2N1srAxdPKrNSMDoYjUNkrZxWkGyIw4emlvzxClxDbotkDqET/P
YRvXCPdBZk9SzF6hSdPUy8NJBY+4hzfRkLHMpNHAPnRRnyL/0O+2g3phTDTTgUi7ve4HzJno+2XS
6ADW03YSHJ1xlNmj2TE9DdK8WgZ0HIeKhpepGTCFBfPg99y0gvhqXMFO6ULl7QaA491+b/cV217c
lqDL/S7uK5bswSFvsfjwBsesvWpZhzLEnqICZQ1CMNJW4OfnV+0FBJEX0/s9J01TUkj7Zsvvtez5
t9vwox4lwsbsvddHG/2HCLl/4c09HWPxX5Nen7iLeLyT10ej7XLqT66GO2AqwywD36Ztsj4bt+8a
9PvPek4xdzcbUuHf61K2ErEk3HTgE244VHUM3RVUT86TyQVCnoJhTtkVdPvuOgNtYPCBHLkkU9ax
jYpn6wpL5s7YQHaSp7er6V7q5CzjaNiAezIi8vtDR0Wym4P0TzM+UulYnWqb0V0p2LQoNV/+qAbF
QF5Td7+330inS/A2Oe9qgEYctes/gA69MvYacUzc/THIsHtPxvi24+RlvGF42MTmhwxOyiGgZWgw
cN+PNDQfPjZ7DA52iL9VS7ILRGMAlifDclsvjFSov5hV+xX4FYfVcc9v2HoJwsbUR0ZTf8GN9t69
ZfMj7oZOfO4GYbW0qL7aQ/3cKhBDatxhV4lbuH3347ELNUgEl1Yq6r6sxS7dqfvo5OLeG7VgN55b
1XarOZOHHzqxTjP7QVrCdKxhllasGZJykN1p6o0JBtPNFZ9sv6wnobOMbq6vlaWyg4ELhF1+l5Qu
sN5gE43W70AYRapsKiDVhGBdSiRFXpYmz08kBNOZyxuEH3HRRqPlFu6A1y4NXXrB3i6Qdpb9WMXs
N5F3sicIa66DPKLiniwaJ0sb27Fd3W8Mu6I4jrmNxkg+PYl1r0SbH4lawgCJfzGi+SGRGPfi7vvu
FX/pnUB4HQdoFEbXNOtNEGnRpzN6k2RFmDUIwEiWIX4JnpUIMV/oPf3NRmL/+m20HKJCbKhgtGcJ
/RKDxBKrUb86PvzF8OBRq/pBjFWE3UVjEJntqDH58QM1/jCde4oOVHMTxGDCEPYlYKEPP7YN2A6/
aqEHyH3D5sRhFbXlUstIG5whExMx1u7ZnbfYwU/R6GlZj3TbBvyuPc+0Xozr+bwpN34+9141s7wc
cyI/rqNkBNIHntoYrxW/NTe1o709qZYkrA5s2052cuKk3UEaUvymGI+aOv7CaiddVefd8PV/Z5AO
kE/PJot6WW7KCzS9L9/9/dH/8W9/9rM7P88ebJv1g9Nq+aBcvhfwgE7HwMONyJDnN69evH755Omr
37S4C5xOmvKXn5pf3y2qUxdscrph++09UKml0tAyyLVFnlTUgWo581GcMAivONBMNucJDBeTgNyo
gRTJ3Lab9jn4fIROB7/qhVfrlxiyY12ycRIMpvgOf8aj92jwK+M+vILK2H9DKuoFJZEFLEf/WG+X
S3JIqy8zjIWRNZsZjGTmHJErxACvyRB1cz7ZDG5EgNTdNUh1Moatknky8/0stz7C+e2L8sKSWfwd
YFPpufNsuyRcnko9BD5n03Y8bEMKkGfcUnjNoyvsjhVsVr705diUG6AsNePV27PIUGKn135r0f5o
tlTkUFgth7JSoqZyiRgVhIgSQ25BFUF0ehjxCXCFwuV0w2Nf9sJyLN4ay2gPZJMyb9WUxRlxxso1
ZQ3pinsZWYhxPly3HOrHzYVNOUOmWk6J0docLIZj8Jpl/W6yC14lKFHMpEUH2QbgGk6kBTAPgLGb
NmjW1gJ8Sugl/ciCMEddE+aoaw3Lk2zrk3720LOPgkHrCoC9HcFFPe0NM+8nipJhKMZVNX27KIPz
jeLHIImSfzzIjNOq6uJEMNxzdorbTyIjl0gIl02B+8VgViJ5o2llwbsJvZmVVEJhdoZeMoIpt3L6
DRWKMZ+CBt/UeVxerMHxev8hzeXAfrq9KjQUxQDxdkSTzgbXvYDTJq0AhUNlMktZBJOK6/E3T78+
evmPvxFMH+kZfe1bbZRdFO8+e/2fU+whJrZ3o6P/869NfCY6sNAOuLqGzg1B/FhVMxuNCz/Myvfl
ol4R+sd2Uy2AMDfoayjsBrakJoNMsKrRwxAvSxaT764PcMSwiGZ7KkmbDhZHwgEQTpYdnZcUBQpy
HuA6xkBg0D2Jv0Wmu/MKcuEKOPic/Q8uGDi4QXPftXiAZNMJ+m1AY2s4569dxKqOKpXMDyxV4yh3
OsW0l/2+XmDcr/+4Lt+WC+ovFQKM9tHDh58ePHp4+ImEIbORmzDM4eHg08Gjh3nH+Edaf0geCQzv
Bds1btVo81LiFjG4Z1UODUOSIGkhPooWqzTLyk3WHHXqUvTg8aKaNCLUdk2KLkFUDsbmR875gG5M
NhnpwjkyoFJi9OdcEuRDU8P3dKaGBsGyaUZ/Fo/LyRSjmZBV4WKRsVS5nmUoppjpxYQ5iC5QFPA2
eBjij36ygFXdVFcZtHJZ5w0uXKELLoRbT8XQ45Bf9HkZ5CDbzqp1nlGCMS4cZH5Dfsv15TJdWMjq
eqimL++3R+mQprLofPBo8JCEr0k2B7nZ0VUf5TYUxSY4FV73KYja27JcYTQw4OJzoFKacFOP8Kac
4s9l1Dh67LvXeJ5ZgLTc+pljvPmfpXaKa0KgYKjy2tSrgwUuX2++1rAeuDgJUYMl/dnyPlRu1VMU
9uQ/GmVJOjQf3Tkth/aQJJJKP5CPQ5NI5XtbLRa52i29fPgRn4eUSuV6Vq/fljP0bM3jXHP6iJco
Q5WOc39vqEfo3e+0LI/clMiJhua1asDjVcWrL/dSutdBdVAEh3oJany+rJ7Ie9cRm3joPqu6v8GD
BAmceSqP+hw0AjneXtMM6VJz3LxfXk7zcK6QjdKX4av3yz88eVJfXACZfYN1+Xm3azXTXl74gplb
stJelqyWLZS/xH/DTFDc4y12t72t9N0fojsGPU8ggoBXPMBt/UBOeqixefzNcx5O/HDDcJqKMWly
1bBIn0wv0uJQ0nj5XtEnnS3OJ2lUrifU4Cydi5pIKfQ6Q91GvisHp1BZvBjReSqLn0JlPTLxnPO2
2lwKlQ0dHRtWHOVtg6HT+FkFnClvqVGlUPmAkKbn5gaxyRP5ghQq73YZ5Q7yRilU7rGvLM29mq3T
xzBIpQtYl9YjZAz7U54uIEzVUkIeDlqyhCC3tfrdkdtLprPH7qp5uoBEwnCxIyUxrhvFJpxxkNcG
wenM2PNSl1/7ME9Jmlrt5XJ7QfqiPJHefVQ51hgKuSlneaoG+1GzWQogG6wfk0E+quST5XXMRExy
/KjT+ht1kNbfnxtLGalm+AQBwvN3cFz0acmkdR9Vjt/CicsykTzI4X9UuRRMR7UJc/kfNb2BLF5e
tQyofNSMAbVV45bk8tFfDKTqTc6v/agzwOmODwp5IoP7qKkONdN5y1zwR10BHmf5FJvHFaiPXqNq
9FVuWQfyUaevGjrWp3ttPvoZdlQgH3V64NrVBapcUqPkPgZZUGTE02KeymI/Bpn05hFlCvcNb8cI
M6TYPc7PXEkI0eTRRy1RCNxmMoP9qJvUOhPRNOg58FKq8XfMtVqutpuDeruBPxQz2iBv5lV9s9hk
xNo6xUhn29U8EJts+sF0stoguIVJpAUMaOfzFykRSOWTRJrf4ECE+cJsJpGWnr54wh/zHflcIi3f
bWZx1jCnSpTM+uyL/OaskMgboPUFgvH8gQLV5n7mjXzkKLbDIK23qzTVmJhdovVBKSqtL5eNTcLx
ZTUjQb6lhERavRNN8PS9WuepuTMfhzZVSMTNBeopJoT2OllmVxeLB+ebi0XmzgNM0vBhD5qmeiEp
5E6RNZYcEKeXhb7r2Zqchcm99PhdCxOTy53J8btK/rXRdOTp5O675lcNkFhqYUom+R4cTBd1cC6+
k8ErAhXAkGEF6ihm2ylIOznNRY7AwCguwe8pYqpN8Q7ufTUx5s3KorF9IqCK1CzgWR4jVeWJ9AMJ
YTW0ifS5XBqZzIiV2QS+pGQ6kycz6QSeXFJuCA0ub6nMfg/2q52ZzhKZ6GSdIhvbrfDoffTFi9dH
eXsGSeBnefry5e4smEBnuW6IbNqzcAJHat/3Ou8+f/1fmjtxqwr/9ev/9k6o2n00+MXgUd5595vX
/8GB+JkMj18julSLqiDzVQV0f/7ut6//B1OtPaEMluWlPSG9e3L0vz/46TAB4URnq/4BkIBRn4wG
HK8IWpIM1LnQJP9wrMCPUIAfoQA/QgF+hAL8CAX4EQrwp4YC5N8j8vv5TwkNMI0DiFv2IIEFuAMF
8Afi/xX5t8u8DctvbzA/LGQfML8PhvHLW2D88lvB+H1E8fuI4vcRxe8jit9HFL+PKH4fUfw+ovj9
JVD8Qtt/fi8yJoWst+H0qDGw2jfrbVlQut5HpL+PSH8fkf5+OqS/xEKMgjf9fwj9D3pNR0xt2LMr
PHxjzqQ6w0fwv4/gfx/B/z6C/30E//sI/vcR/O8j+N9H8L+P4H8fwf8+gv99BP/7CP73EfzvI/jf
R/C/j+B/H8H/fmzwvwhTyPuu7hcSKk0vbe8jUuAupMAfiMCXGH1Vfe8jKt9HVL6PqHz/6qh8X7z+
r41N/EW9hIMw+Xq/e3r0v/z3ZP+fqbcGfeeinr7F57nYb08Qp2SQZdpUv276ZK4vWDQD4wNgMGmc
6yYbBHIb6LiBJ6Gxqha45bstciZrQoQoPdw1OES9eaPSvnmTSRHZal2/r8i46LyU2yZ0+yvX2UW5
Oa9nZHUEU1jNrzOGAoQmW3SeCjLiTQeiSb2v1jD2w44iGVshgpnRngxFGGplC0GaH+jfCLHTesm8
s3IR5r05E1RYbcqLQmCHvEpbq6Ect60Gel54PVrBlgVnXTEPbqsryrZHZdcN4axJBXhtcJ5OOT2f
VWv5TgkeLxY8i9VUFI+XFbw6LbPtcoaCwGSOSFcbIhmiI+P4STBThoYJVGdeLavmvJwNCAfqzRtp
+Js3Hd71cRFjYbOSPfwQY22eTSziHlKMPaq4fco0iBbszIh/MMAPcLSodlbMm3Ys64wZ4MBQPFti
0xAAG9KLwxjfUtcGk9lsjj4L1XfluuD0AxwHz0SX33fEIHwGZ5/35ZgXJg5swY8ET2IshI2jDY5Z
J33kUrn6en336G5t0DWHU5VOmQMRL0P0PzFqutjC5CCA1mlTL9AWWsFkZVxwn0q8u4YtsXUr8Fpl
DZWbjVMv80DjlaBLybNG1vv8edjO3mFRIRCWjN94XHCOvrLo6uPJjRz7uhFvZxeSBF4jDLAatLAZ
7mJhYzBIzoHZkbeYB45tRVbm8Wh0XXSn5H1FRYs31fKaQNGAxDdiorxzd9UU4stx0nmRQoSK12xg
Df3pZ4eh+RtSLavQfNV4bP4mEuja0OBO4ZGnD9XTmL6XfZ4dpg0Z6Bg24oas6lVgnudP85lm9v7h
LlHc8cOT6PMNJQhV+Ezkx5p23uVQTyRMZmKq4VWUqVYljRTp1IhDgbwDyKehhcPFwriJKbDiTnbH
lkQEQ0ZiBBQ5xfuLep7JDvoA9yhE/X0gu0E2PZ8szxCWzzDAyOw3tPcdS1n+7ZH9hDWkPk0vZ+Zc
aesx+zpL+JquZV8b8QAE25trEPYaB8gNMjsq1Etb2kV5Ua+r73BAcEdAeDaWmG0hv73OBCrVGH96
lKFVCVyH8TQpryqYdyeyPIPE03r5vlxWCP6WXddbwihkSMNrwvdd08w0sPFxA0GYIrRfW4jZyKz1
J+xmhEU3qzf4rHh0nzGIsXFAFBviLLac09J02TWclJAZyLoTtKh2w+iJd1ba6tYEWwoTByxlQb6R
2RW5A/Rkt6b2Ep/DSdjQfs45REbkfXcun+oGXguEqxu1QAyQGfS3f9nLiezErxWXmwe47/VUrIwm
i3U5mV2jUNUgjmUhE01yqbeYGXIZPf4b8kfCWSBq6A00tXX25g4yT+Iq7BlFcQ+5iU2448SbvqFk
vd+3Hb/V7o4eS2YqE0uLRYYUH+6GuSQ9Extv5UxdlijT5RiPYmpyYAZuHIo8QGdztu47iSGWnPhT
z40oLOn3ZAx7luwtD7IP3MvExrCfnLtlNlIMoei2cnev6p5q451s8r6uBAUVOsVevfW6gWP325Ic
uqspn5keUBp+9hCUmZIGVUMJzDgEkpIZC5EKjM7S4g/H4+ExdYVBGwwjl9zT+dppy7F4c/yKWfw+
vJ1kd72u37zBrMhd8MzrmGg/Ow24uC0n5OabFm4OPOJ9VW+bxXXE2J/jmnR1o3O9oNTOiIYUM68a
y+bd5G1uw9UtU3e2TYa5tzN1r62GvLGdyJdrYJYg6PVjHjlZLOpLqB6SwBC4i7B1eSBj8GG8b25t
8v/iLM4QmLfyWnmbz6D8vLfncPuzu9tzNRkk4DNx96LhlBlvG7KAf910BtiPI+x3f7OTJwcScHIq
PWmRVCzMSoCv+TwnlgoVDDQjaNsVDPTOQqBH154Mq/ut6oLHmJeqfsD3Y2MQQVV4rDDd/psZoM/2
sF99mVqrFsEdArpcl+grIhxMd86sSTnsQhm3pSJT1f708wPGUejBjadHB6j+clf4ke4sCAFi6EG0
jIRo/n6yrhAXzaOJN2+ooDdvBjQeb95IgUrcJf4OhyZggMDJ8cpoMiO+Ot2u11hwshKmBHt8Rdh1
LlkkY6k2m8zQcMQEF+Fvtg2uWm9ijSTlHLH1nJt6sE4DR6EUrkmZjP/et3nvqxzHfqQMjnEgC9Ol
2iENhFN3W+JX4xuvgY07lzWDaGlbHW3UTtMI+auZTqA15aYrtZ1prBksmDD4aOiJsGz49wIahWch
2SbQfoSNm7JwmSq2z9Xl42byvpSm5L3kanMJBMcBH4+HaqrknVqQSDLUFdVhVv62dpP1BR7FX9Zr
uqqAfCCf1sBnBYbcCUlW4Sc3u+cWu98cidH4FO8wHMizqDOSEgjiDlitQjIEh1Y62HNs4S0NM8Z8
lO5Sx8OgatQcHpJdHA9qiJTmOJioFg6VKDiK+N4KnE4HkxFwExrmZQIA1niCIGUDkWn6FtKidTFJ
eRscxRIyADepYIXL0aScz1EbtF0uysa5uqBOAnYJOIavy+jeyWrxqSIyENSjTm4pwRWMjQxk5IVo
V9Gn3aQ4qA8TYfGB5i4aeC022LwtZxtYDLHFs5MR2ruFTGNnt5JdSmpM1bYm1R7HoYd8R68W9SRT
Jxwm7xB4itF0MM+cUdSoy1J8Sc9RTG5gec4YOmyfQQ0aaMWZ5CYfDa1aXvuwMMeu7AWwSh4JBXGK
Vs6Q9jWz69Wm7bVyD1JZvnv2+j+YO9yzcsnH93e/O3rb4RtcExYMlyIspUV5gECkVM4BrMwN26IK
3hzuAlwWMTWHsKYg2Do21tmmXlEsh0KJ/uSRLdweKBX4ZrOZDcwwur7icYqhZRxfLSSjhmE5hUSY
9oGnjkF/4uoURqqcvgXZbT06TBv4YfA3/6Yfy2IbWDTFW12j4jsKn4Su5Fw2jlVb4ReiBeGt/su6
frtdaTGUb7bfnpFZpxkr2NHresOMXW1ieIZu2ErofEA/it4xXV9IavOy5+O55APBmzk2FZyAUHR8
NVht1yX2lSQrnJQrmg4s5MQ1DaZwLDfJehJNWYidEU90xxwbHzVr9Az58/cWrUbSBYMHjIB1q56Z
Cua2rfbai0vNfkDmUahISxgWAbaZyVnpccvV9ZzhtlxOtq/J78Esh8uaw5qZuZEjsJ0hLis+JlOL
IdMJES6m0a1TTuqYkMfYBGayoyx/TdzByWbCqwBXiUR2mm0vVo274H/USySluE826hN+62d/l0oo
AaE4KJYEhMIUXlr8IzGjipziZuVen/B7J+iQTEJDo2cc16XRjig01hwldPMgaQfbFRRfFilq9BrR
OpQGU4r53Jg5oIk9ZZrpD3jcC5uOF8DFahFxJxNorjcAjkOqkR6vvTyOaEk0J8BWdBUKrz1Sprfw
70BQMIvcxrXM+5mboVRCDrUFyaiP3iBBwo5nUjOZzWqyHioIv83YTZ+t6+2K+A2+RCGU3mBgstPt
GQdvEUURfRi4croHB3afQXOyKcPVNSBfI9SiqDrppOs0MUh7o67OhzGS4BQ66uJIKgBFtNIZdacw
VpsycwNrtiXOn0022Vn1vjSXacw3DeyZdF2iElGIuoK7ZHpv2kGkgB9wBEiKKFQjLQqXfaUsAS6Z
Pqp64GNPK2l8Vq6Qp2cYNivvU5Aojo1koyR5YkkUOu+zVMBPLNR6ndiQocE9/CVZyVFfJCao9Jdv
j7ZLvIc0AUJ/ccBPnww+uX+/u+swYcv9w+OXXz//+nfDLF0BijZBJb9s0VN2gUFweLXZlrBFctOn
HLsKvQTWdT3IXjflzWUhkJQTd2z/HvxSBbvKLAWC0BkYJVmiCJmJV13u2cP8vez/r64bWPwYsaEQ
aUzob0D01+vl/Wge3RvXBk2bPv+xXxKJB4Q/XoTfE4Rg1pFbWULaYl5qi4zVqKf1IhwxYTsPO+9+
//qvEJzYhn569/zo+X/DIugpjNDyYIb6qIYQs4Tt0O4AGQ6azTW8RO7aDDrFk172sl4ur7Nv5pMl
NOb8oppt+kFcvIOD7KvnRyDgTTEixywREa/7cPAINrX3j7od+AInRo7jqaJc9b3wVSedzpMXX30F
zPXJ7x+/fIUl3Pn7rjFrcAkj3M8EHBnLd2Kpml00+s7AZh/cmMk/0ogGxTt30AfOAp/4wf8I5aDN
WHOmsdNAEIhtKGQuEafQQhUWtua+ruv+Yd+Wbi0/XjHw9B/WyJ1uDrDMZBLdGzDUKX2z3Nn/KNZF
Sw/UDdtVz9uipjpvH2H3KvlAFL2+Sh81wFzY2/I62NXYlGK9GTWb9e5qTClSBxVlMstfV7b81bhd
FB4E2IodNChhd40NT0JzbKs9OYZMJx5y3qZcp0xophrFLSgv7Mrxid7n7Pj77dt78DGbJ87bKNEU
JaiQVlDCEWcPqrlGcOuAJnDkmrCbXunwqa0YJnCj4RcSt7zjRurmJYNyXKjz12vZqjnhVKPsc7RU
QBJrUpE4Z0iHpVuje+D9bOq35dLGAB4zqC/JLfPgjmPO6NdxETCPi7opd6odk9XQMYOMAQkdtaCt
RF0sivLERrr98/ehNn9qwAXgkzejhmtaQgk0ZtyeSFVmaD45vq5JtMMXpo4ceK3JeF5iXGkkAFSj
5CldjYm47TXtJObWwfV0uj26zUt/VNKKuGQHZluYhCkJ2FLY3XV+162yXi89EIq50MOJP0ntOjvb
r2VLQR/UeCqSWs6zvXez2+5BpSpewendm4U9JQ2oDdLbul2hApxtCyX+6wf1Zq8gzzLRMGPP302T
Ov7FAbWx2opU4e6KX46+akGSaMjozFxoL8D7W5aXgW41tqFgfzsKyyycKq3YdD01p5eiZdHS4TfV
FEkX0NtNzWldSKT1h5y3oD043W+u7eRg5jxFc2buIhOyfQbBWY5HgzDlUAGT3aNwiwmRUeAm3WIc
tsvyaiWBbjjgtWpZYkjITmhkup62s8Y0Y3H4wkehV3w+Png0PEk13uZpn+gP7kNrfdiwlvsTbnKO
zqB3m5wQUkwOrdRpJwfq7fDgELW4rNbrRYetCELALWbHZOyS1qDGp4vJ8i1jm3v2ew0cj/Ha2jKE
gIEQq4HzUHfHji9p6MS7RkXjqoicQvHjcE/82zvRcq9YykRU9hEM8nEeFkXXjqYRviyD9s/ILPUJ
L55D3Qd2Wpj2EKM96o9uDk4WtKd70k0xPLmGgHSHOK1BJ6MhMJ0YhDXeCRgxr3gZU2xj1VD8q2If
11otHHndzUd55KNh3FGGXaPETRM/74z/gIXyzpj0bXDfb3Ev2dpeaFSyvXtUtg9fIHpFCQNWMi2L
VlRamS5pZGL2kqw7gfGsicEjhPRJ19s6w5OG1Qt4giid5Aovow/gI9l2wGFaooZk97PD1Kk53NP3
OD+3gYpI6mKXLBd5VqWvp819Brdm10E7UCEENmo7BO+dFmlSf6ApUTXefExPno5Va/iMbHU1LQfl
/Vog98ONHRWE44rUDwLN5rVCos20bSxTs4wXQt7Hw0PvXjNi1p13/+Pr/8JGP2NSfvcfj37/dz/7
GYf2Gs+3GEISAwmxevbMhL9qEgHExGS9zwfD6rsyjCa2uV6hgwe/ZNdwNO/tTFfXxgNc7pHgqdOx
tGs8nCfipcaNQ/FHvnxz/eTZ+MXXX/7jGAMgTJoM/46fffn4d502p0CbAmp8yG9YnmKfJaN+ozHx
FROofQUR8OJiuyFrP/HDOqeQa2hDLMAgFCtuvp6ckfGas4eqm6Y6XaDtRoWKeI5yE/ilmuGY1ls0
IbdtTChF7tH9MyI9CbrXMFZkNkIaAQbgDIebcxEDy02TYMMKVNJEUkFqfhmlpZvjCdED340nzcHJ
LzJ2roO3iYZamcqakGPCvkxPb3c5+Dzw5TO72UclFhsMrNYn873eDQUfXxkhpvvtstvzLQJO9qjM
OfEPdpvCR91JhdfRpr3UrPT+zF6ctsRhq2E3mpOaVCiQmV1zuNMUPIaXTnYkcV5Jazl01xG/ghck
j58h19Gs7CVmGBV9N5RoSijsy14kgGOMrA270Ns0ajGW7+xSrDfn5T6br16XI84VNH8Pp1WfoKgQ
MvHstUrMjZhn9Wyte6CF3aQu95tByvhquend0G9Wn7fPPJQKKYDiypURDwoWcw5bPUWew0xeuQAe
BJiDV1nAkZlPsJk12qCWq26vtYHUY8oJXeZWELXxI4YDUIOzKJetlz4Lo0aOiUbVIKId1wFkpspZ
lpcWbUp2ol780XJ4NbxU3BBKi0K62WzK6JnWdcJu1WUxe5rsdWyvDsO+0I5Pxth9UU5oJ1QnYyjq
on5fztIeRq7zuNH17cD1Oj7TonTZZ5m1cDeYSKrTLWyKs94fgWDtl4llfS6fE2XC51bWh1kPdInt
k6VnSpk93jRZ4UytQORYYyBEA4tNsetGOezBZM1LT8IT8yzP7mWfpqd0AtLJ6tr46saT69/NczUS
mZkqyrNLknJhHqg9VoAJp1ai69kh4d8KRXPO4o18pp+9/RaA3YazgrsMxyYSh61G10rQlPRkx9Lh
ZsnP+5n5y80LeHP7QpK9hOfmR58GtBPnhnHRMPin15SRrXb47QELEenZuCVDOZaBvb/fsLYPjIk6
feGGx6gGbAAkL0JmME7Cv20Z4r9tjlKe1TyZcNCAUBXbi1MgsIIF6RkfHR729uBDgmPpGr5Gs/0i
ancvdXOs13RyFLiwDxiKQu0UGQmrMhjNasIjkV3A+eBisgj5nwzdujxDRbo3gsbNVI1bPWhzxUAO
WTzMPjOXbcCQLcPupc7temOWLPWWnGNoFNRWTAF1vVmIxgxPaUq/rzcJK+aY/NoTyVubAmGWcByz
CzPe9EwZvO64iIHX3edzeW1vDDYY6O5sW8JRcmJW6ERgEigh0qOvWzxHox0Q4GGclwduC6UYp6cN
ghItN8IHeA45fF4QL+qyXKeqM47JwHYqtlA/he8XMPBhI64zDN/CQanIY/iUDw04bRdbOLOwaJwk
kjvZH//4x+xico2QUQ17JBidwBrLWq0RnQnOwBJizRaoimCqRV7WZOVmOlitfv1BfIy3W48A+IMh
g94+nL2h+wA87xuHTnP2SHiySTn4hS6XualAE7aQfnZebtdwxqzQ7eY6MGLTegHl6Z4e7OiQYZ3L
0Rxsb3Ck5hpI5WpMRua0IzsIEAPOd9WH3b+ZoYkXwu7lO51Cw+LEKLbZwlnC898w3R2mY1aZQ8vA
zmBvZ71hvh0wgnd8YNb7eJzuZ92rbjqUod8lnSmlKN0VsMlHLLgFhvONOm7yy9/XJu3bpSCXJw8p
fnhDtCO06nCJcshUEJs0zheTs5HTFA6kpPUYP8TJZ7AHjaslHEerzQikfzgcLaOAysHSkiJnrGRj
Bj0g5Fppp+HA/spAYyAU84G3bRDeDsON2hzMTWew1BeT64gfCmU9IAFoVTeVBrvBRYymlwRJuWvf
tJWRd4U2RDVfetrP42Hs+29Hp/121iVh11AdWBoYq2uwf5237Nu7DVvCYM664Wk9Ng3se18T9jbi
hdT97O7sADND6gyNYD2kT1FsJkx7TOScMKl/eEuMacKoyow2uyiBQJ+jHebsc7pFni/TJhppPVSq
rHWmi7PjkyrY4U6nVh2WBnyknVURVKpSk7tI87ZWXJJ9WnsRO3pFnCvB9MurnVHgeSPGQO2lgVRi
JpiVWJbgbCNnx72KTcPNnu2HlW3OEkqKYXklpq+Rphe+8OYcT4Qpy9rrZ917NjkO5D8FOtEoQ0FM
A90oLRsBgeR0gkp56hGwIryi5BANdmEGUSQvafDU0BYuULmpspfIZFo6cp1MJLJL0T4nEklQcnlK
SN6UbLeaDQkm+xt3HdKqPZzWsWaUzohXGKrNV4CnToctDhloefD5KPskrpelyNX1J3lj0ZCsBhhn
pehlxCsbDmYU+sMpMCbUF2WrcvXJw0eos6vRvnA8zhABOoxC3V7IhjcboZkDcr5mt9tlySLyfPKW
Argi/cR0C4Pl7ryK7nh1jeVJceNVU25n9Zir7/YSsIPkemYGgtM1p+Qxd2xo9CS0bQty4+BLzmNM
h+kv4pYO1BiFTFdKwsmdTmD4B/Sv14LisK8u25t+yxJydNW507mTrbani2qaPf7mOWK+rTfTrYOS
bSBFRwkl44j/JeQSIu1m5CsG2qSSQApRV33mYtKdj4WXY8xeWGeXWgbpe6gf6KSCx2s++yFSPcgb
NF4ibBAgU5Ohc9tai2frCvH+A70Q3yfW7LKv6vSqRNjRdTndwgJ7XwI/Y58vT05q/KtGlCnoKjUw
9VNafTPYY0IVf3Xk2T/hIhWQPqOm2a7oBAIrCvcFyPDrKBD2nltZ408ME9Dt5JvGnRF1AAYGHzdU
ULRU7mRR+xT4T3aMp+Rm3jCjxttiRT8vtSrX4oBkhTZv7dl5ZQoTgbYjSgVNBHyxcaogE2dWNdSF
k8uBWO6Y6fVECaICcoEiqe4J+qZiaztKWLCAWzvuseZoiFkELMnnzVhsFkQVSHIyHDmTIZJcEo3R
JxjqrzJaN4Ao8yWj7vgS9pKAwjz+aLfZg0Mv/kXjO9a3GlKN+64I9OpurL1A0lDr+YsWwymEdujE
e7TtEE2X56Rkq6VPpDFScoLg+qo1bFRmeDmnKZirMN1gViyg7hH3DXo4jAmMvwrbt00MZjw1UDJA
+kYclnZ5FRiJq7TReZZ3AJnItHoo1G97hpW+ftt1xLOhMczKDJfR69ruRIYYDHNslhykhS1j6i08
fwQ3a0YAiJZVNFjPnX4vICtbSPfb7m+3Z2fXRjg3kBSIN1Ohk8V2dbam27q+YS3o+csVfissJCYm
Lp+vm/XoGD4rn+1IGFh00eB46jd9yq9CBeowstKnYFyx4YK2Mi2vVrD6N5PTJrAwaEILqUg4jVem
ldZRx033IAek7e7dEBU5gRpkSnoY9HUEr+LL40r03FITiPKBYQwOKXq/k4eCFWwkz94Dx6ZrlCA4
hPL9KGqUUjnynPkuI3O3tCImb5MUcw2W5tRyg7mDl2k8plDg405cOHYVODL8r2jYXnvcg+fSPY+t
31L1XTmwHsnsyVSYemKtCxWXfW6JoOm1RMUG+ePpkjC4q+Vqi7ctKO7NdpVnZra303a6OeY8B9nh
STuFK2tTS+R8J48WzUMmuFa75kS1x3IMP4GOfSHrNzZ9lh5Yg3sy642MxSscfLn44ikpE6PNUuUX
dCYzHMNY+qI4yaugnicy2WsE4ypXVINyoF6LbqK3Vw+a4+rE47dFyHCdpePgCB/opRdhUHb1O9nj
GcvmcnFDOLTYw6aEBj4dnJHycrKUquiCbtJIeIuBxwCMNRI30SegEx81Rd7bXWpsr9Io8qktBuWC
ZY14eTYkDOlBTis0PTUXCvyLoy1Lr2SHp8IaPCE/lKZebezmb+Ut5ieHoSn1mJDyyJqOXxwcOkWC
aRWSHx1S8CoRBlDsNTbqcMo4d2NKgUo61dpC1RSr5hhs2eX9nHl9MCDJajzrD7Lxs1eiNv+xznFi
FDuwjHlh+lXrjWB3bWpjliKh3XtUqq7JrjZLPg+kc3Xa5X9vgpO57x/qWqNbKHsjHOLvKew6dNsx
ZatqXAfcN6Bt86sw/YocZUyKhGGQuW02eS0l9lVXE/ZEI7xrd7fR+K6XmiExXLI35jTzRbmc6dh6
lnTwat/XwWJcq03W5XWNcMP6ljxpyKbuwGXxuwHCwUkbVi+cdGHqxNRdZg/9rDvHvb+R34Mx/4T3
3HZ4bwCAJb19/7Bn6YDQzLanxt6+i9AZeOWG1uH497SeXeNfvhteY23deo3yVJdasJwsKImbR4lP
4dctVYjXnyYGEyUy7YnhGzRyfI4EH6DhMrw4EXVkoTDxuBAzGMmCTIByW1A055Z+MTyDZvMLdwrZ
YSkh55bAxoQi12HBgRQub2MxHDdi3JDlgjWIaLk34I4p5j5fiLRzGtMQ6xZQSFYYT67d3tb2s8OH
jz7t4b6ED0Rnj18ddfb0X7rB7KRezNoHs9fubxSs07CWXVuyXrQyDqIhuaO1ztN6jUispCWC7ENJ
cYBjd2AuZ0gWpAsPClFt0+CycsZBDWKFMiAtKcObEiN0bUrtVo+50HYY7Yxgb0VVFHvwOis5vMBZ
JpBaudd0fFIH8V7Hs8b05QMbZ9W3x1wknCEL47lnmCkb7jZoSFd073R7yfd0TutGqqoonUFu6PZu
sgKNT37uZGjMi3g2+yk+fRu68wzFfpiR2A8xEOPLJpBe3b7Omy4wYazU65HBAp6j3aivimZl8gZR
cRf1WTVFCqqXi2s0Ippp4KtPaQMliCvJeDgg7RSrSjdipyQ/uHIn1qL2pV6Zzc9cKeDSmMgqpS1q
Iz4svt0fygQHh74BvJ2PpE7Co1+2UQ5uWMnmq8a7AHfAQesrut3eAtVRYD2EeRU8ZrK/nk7WEYZA
3mxXaJ0rZ3222MXbuOCV8SGyr/cxqsfYJQuKa7Q9pUv6bqRAkZC2NsiarbFLxXnv4yrllFfq7cDe
G0ejODSwSSfqNuLdtpq+BfYF/5CdGTKw0l5RWxs88UP1jWzvhLQAZ+eCl4HcdwMJyobcRRNGVKGh
GqbpRS1WsNs5Luyrqys4bedeQqujzL9F0FG6OTb5e4ElnP3vnzI6uvt3jAl7Ab8jtrZIw16oG+x+
9gK27jnQofx0e2Vi/6aZUs18pJZgyTqQeAGa1UX7g7+2EP/K7Q29+4ex13UQeztlOW9QU03igTbd
2+Ub423TBn7OXe6DIIx+yNXMqJaZmYn5OxHT3RnZuRhWgSg8nXdfvu6gh+VkVa3enr376uh//mvC
b+vwiyGN5Lpe8Khdrcj5lFE9aZrruQXdM7Cbg06nKYEANpvV8MGD1fWqGnCCQb0+o98PuPBOp5j2
0C8RId7eEsRbP3v08OHfZR7OW0ehEteeX+dOp80AGS4/HHyCyHA5b2Tj1fV4ckp3C4WC5DWcnrDi
JwsNkSypWat3PhFb1UWJkgh+EGuTP/FY/AnmNCOovfAyk2581M4NvG51TeyKkP3HeWwJZ+v1t275
WguQvtcd8cUFJgoDXQjobV9mEFkCB/0bIbB20VM7NGapKJqobKYyq9w7RwUMf1QFMQNAGmDIZXWT
T86Y0gJBoFVYsTrEImWm0PYMuprrwxFeeylVKlqBqTkUO8rZscuMV/1zk9UcdVQdljr0iFMB7guW
wVkG6vWOQo1dQlSm/eAVad7uKDGmCipPXhPQuUfMfE+0osgaphpOfGJqySU2aW62O0cWtBkmJoTT
y3wETeFvXr/oFc+Iwfp1R8dEP7smMpg+fbrSOGiYFCTKB6azx6tKYngnaRyRJ2GTnVdXI/ud6V4u
3JTBiSQ4ySjcrgi8zFfq0z9h6G++icT8CimAs/sBYW1aHRFWDVVPB9Q15UULW0x9jGDOkdSU/Q9x
YQxkLgUMbMzVnpUSrxIgzLYsM8/8op9deepZfms8zd1AOx7rIeFB1wwSxL4uppTplj6ljnZtTAQE
hx6P27dOk+htSb21JfT81jcWxyKM4OQa68UdkHLw9pT8BTbXhRmGvi3SR3fQjvFMjUwzuN941Mok
Q6SaghIcEwAS1e3hTclHC3x65YjAVYTDeJX9fIRjicCGa4zhMUVk7HEexknlsU3g8hG1S2spgfuZ
CVpFqNMaRjcuFuMFG8jhvQi+MXXlIvomGNKNaBj1QCjwv9ZoHkGIphh8su8Nkh6ydLt8VZyeSeRY
iU4YtSLBRQ3uNrlgrfq9SEQYXlUhu7PqwzQBJRDKFKuTzMiyuei9xoyT7usU39gA2zSAgggwTAAC
CLcU7AB2vX+YDrMso8flkwkNPqGNJobY66buGqV0gyA1yNN3iq4RipDvm9edNhS1mHn/GPPtz/mi
mjThrEu70llvN9WGsQ6sONDeoduQRjt5hOzFQjMmNlnNPtHMO3YXWdwY6yUt6nmK7a4kGXVh0rEe
M1E2a6+3swoROtvKz0l8zoPCJbaAj8SU3C/zz+zSz8iun436A2rqZmK7v4gR5PwSUtm9sbbbZRIa
CeT9xeS7Cg044STvIl1boCL42xa9ert8u6wvl14sJ8PdTa1p9u6kChO50jOWY+kg3NF2ywm2JJ2E
OFCiqF7Cjw1Fw3sSLrPY4cwVUXZQZxB4aSeiFeoPuN2hUSwVnN7ZU1YMaMp73c+8pKI2Jw27CWbe
tMlWZxEv6N0csPimgI0WG3WX8N25iSVxKXugI7sQTsmZuHE2zMUwjCdKMc68+cCoKqoNY8BR/Nbt
6aI8wDpRz3xO7CG14DXgI+FxYAdJ0HJrU69XPiOFvPEOrkiEGFksxCyb77VE9yVnF/aepbPoLLtE
l2BTHhIYWh3pgKIbihfteQvYMxqdcehJSZL4Al1cTVZGMDGN9XwtMWXrzWfAc5s6QmINcb6NKHwL
2MHwtJDeoPc4qyRtBgwWXSVm8Xac/TMEveuJ6bbe/2HCvP3fLQh9TGC54dh37YeXRQBuB6MMb/1m
X6XXnX9JrYT6EIY3HoQrddK8alvBeNCXrfKqlzgIg/jGvZHTqBoT7zzq4dO3SQymVUa4a+lWqit4
ITxAGcEkatmsXftwu0a6NHuuncKrXthaGSWiqDZQwgTzNZPrgfkpH1vDO+IYtVHszlSBftxUxXE4
0OGuhppYiMkWSi888RbSyviAAPPu69f/dnU9wLuldy9e/28a0A/rNydg2u3QlcVucl6sFqvnKVWY
FgnP0nGbjyu9H2Y0onQujck5Fhil6RCH5fiV6BrEkZ1XIINtMNw5u+XkmLTJlcq0Q3uFUQgMVted
d9+8/mtUxAMbwF14AKW+LWdo4v7ufzr6zV/97GcdIy89oy/P4AvyjPfVDG/jsssJBTrFCDwTaxnP
GxIXlUnJHeOKc0abe1ZtGjMVEre42cwQ/wLTwGO5XuNdEhqLTtAycLEQo2vY186WkwWfw3Bw8SZl
22wbCf9OjjclIQxghKxZtWY4BRMLfrJk1X4ccbBOoTdeTNbNudNHuVHw0Q+f/vH50aujx0evX42f
/vHJ02+Onr/4GqbrkzYtDAwVQVw2onJhi3D5sUQILAoQ57k6I4+ARGkPZ/oSnki41HQG+RarWKBp
qMLeLv3Xkhr/+B9sQfzgfzRTMMrck+d/Mrh4O8NPRRCB5uXTo394/KXLNyiXzXZdFjnrB/Mg+auj
L168PkokZ6pKJH/68mU6OVCehuReVRI9F+nZN62AT0MWv/A6FeQbTetefVwI/OvLn5x5el4tZu15
x/S9cEShD0z8TTihS6FFMbL1hI3y0rOOQdHMrdh+dop+hQSw4lKpQkBEJWnu+Qu+hTLSuidHYrK3
6OgGXAk5mKxoYUvoCYrGLCSMVasNxtFVxyiVYZS5Bze5AwpAkl/qubR5BvPFtjlX0zOfBYUNyJet
9mfQpoLDy2EUHXW2XT0qTBIP/FkqRiY1ytyDI61EY6kgnaO9RZgKWvRoR4sgiY3X6LTrc9MKXj7c
iu7lqTJJcJzK+lyk8VLmiqASDiDNAD97ZHmjpGuvAAyjKe5Z3oJ+MYqf9FoyzyX8mPBliWDJ33op
77JhSoomEzznfmRBRp6Ts3+sluT54polO4IDoBVOkQgF4o0xdSq1PzjKSIW/EaoN49+o5qQ+mUEK
v2GsX3p1mHj3yHs3JonFNVixm8tJtUFZAlYasxx8Ua5HkAufgL9pM/8KAzBTqDreomEsOH1heCIa
wAZWkjZ1RI1QyR+eP3v1/HdfP/7y6ReFTttLTbKRDJh7/+Ho6cuvILOfDyHFH/3tHqrlqDg3Pn6J
vg0TKyKeXsHZHMnr2QRhFQpO6g9OP5tezPqBBVTyv1xsYoA6QLI1v4AgdiIAeR1wy56FKb45dxP1
N9nDq1/Nw0ObKsJCQlL2Yad9kWtGlK9P8725hAnKwb90QNOdy8YVsGN94CFM+AeqHwz/GPthM5Jw
QpzbUwAmN6ugvck9IkzDIQIIEjQQiURSfknamcJNRF9moJ+Zy1SzWXGFWlRQBQcHUxMu3AhDDLUT
ndJUCi6r8FS3cOpKRXqL+iSStPTlpuBmt+hrUKtH8e5HEOfL0j89+B/tdMudtPdRyxZBxWqP77x7
+frfU7DKejCdrBA2/t2ro6uDn/1s16HDnUlwzOY+7Dup9KvaHB1fkfPW8xetkO6U3qSKcnVajNM/
6ZujB0/XUXm1ef6iMPm0MQZuDBwSlCHgcBElwc3VrSWHqdliBM9ZytVNFr8kkOS+2jcfU6RpaA5C
r70+enbwtzkyRAleHHAa0/BB1FR1GudOonu0HZ62Yf8tHsR3jDqPmqT68GG7xZDxXmOhAdA0D87O
2em1uSbB8CIYZzOM9XPD8HTgsDY9BxoRheqfHw4zPCdVCAt3yM94xAJxlH/gAep7e2D+4gmTvbP1
khd0lqgf0LhODMJDc8DR4JHm3UGBLb3aWARfSsxBmtjwYjHH6PqSYAgpQiL1IIkg2uCVgylDnU76
tMvhxsW4l/7GVeKET9bXfKYhy/8BoVIsa9MMmDyfg6J13xwBJid2XQ+O5KHoWRRKrDL0jV1mhGqE
KBwtIK3Enk0vRrZDmsm7drmrG5vl56PQy24u52PbTurwM2xsfnl6P9zKpfBRBocU6hFyb1mhoy6t
z8AyOhZTuRe2JHkKozbChM3lUA51FV7X/QO6zHtiJ0MPAviilMSsg1O0fmzKPAmc5i4T5ZFppd4Q
+cVNdkmoV8B9qdBdi6yRX7xKoX2EQZC6mHsWLB1ivGSh20+Esu7Oqll2XW9ZHyB9yDaXcKj7dbcX
CwiWvoBYfFxwmaqAgmSWSPqble+X28WCFfvw8sX45RcYnKTXcsj1DgfeeWU+i+4Ewotxnt08ZQzT
Ot1eF0/66Gy9eQki2jPgUM/RbX03xqZpuR4Oe8TvZy1U+hdsv26IIsxZLdHbAj64XVJpNK3TRTlZ
ZttVXyRPdo7xVidhAwkkTy/JkrwBEdJuHQc7uemFEFFgzK68cW/K8m3xcKftRHqIbzm8UkoS7do0
Rp2fcXOt12fpLR8ngVJQMIY1ORfVNPD1ujrDw06oQEsj3yLLXrXz7t5ObpSYMS7QknIv2gjhI4sN
/vkpeT6T9IbxCwSK2zEujNclqiTn8xKHAeFPkdvho/hn2o1F3T9GQNW0dysyJdVljtcWLrBrPc/m
Ttl7Id6d9vvMyK4knwDZ2EblxkfBbfL8DY4S38HyEHRimUWQCmxO9nDPpUe5lggFv7pEecLHPagM
7AHflJPLDhrBTDbVqdohdW8JWbnBLBgnBOPfcI34QYaRYbVVfpN1gyRgJRIe1XK2A0OLWP08Vmle
sJsq/YHdaI7XWuU+6CauvbtQgg1ck8GC0HKB4R+pEw6iArgjjvXOZKJqv1hWvRnIOaPonqKvbSDa
2MmG1IQ+HHtPzGe0LVKjDb6azeYovJ/RWpnPRi6eVACHJfcTu8rr3a6f0sqnmLCcPQukuV48/CLt
6ww3newRhbmcXKhiQ7UBJ0DbE34KjtmmFyNbQsBpDay9BeWKD1UETZU+UzFCFfzLNcnOFI9BGMSK
iowjDnkGGQnpgWrDm2gPJU0PhDBZ+u53lGOcu7AJgWO+iRUrqkKbwq9Fs/BOZB3Qdt+esMga2JmV
a3emDTn2xXSBsmcxXTR0K4r4WOY6IIrc5nH4Yk1oWagAQu2PdegNUUbNpTKqZRxiL22rdL/8gLwY
5YdvR7ClFWyvtUujx/XjG0yNmROfYW1yjqsEPXgQ3Iyaq3BRCPntkErxfjxRn3fkI5y7RbNrR1+T
qwY2qQiHdfc+bcaVvAoHOGibIhUe2M1Ax47FiLUfF7CN1bMCXymK4qKSYZXQ7s9d6T0QnSmOlMGV
5AGbKcsAwS4ahLDiobRHpee9m45QCC80WaBi9pob1I31swxX5nF1aA/ufDQOcnZldH6StUGOEs1D
JzG8Xdo+dNulNHvAkt8sNM2ieUIuJSkSWnIvs3/XaBorq4Oefz6KqpdPyeq5CyZFonovc0xBlnjc
uXnbkD1We+Ct6XZNt+3NcrJqzqGBQhZAixflBcjKIH4Z2TcgDKhOqeGxufym6O03lan2U+tNYMzN
TNjcsy8KeVLS6RH68HNKwbilq3jmAsR/6c2zLw5p8J998SgAzkXD5+USJbRJ9vXrL78U7RNmeZgV
5JmwLt9rr2Lso/iIytKqlj3WVCHwllhkPuwf9h+FpwsVuBuhptnNtxIPVIFbNSvSRyxI7PYwUKKM
g/GSp4vqqpyJRK8Aq8ah1o5/GnVeJCbUJCGTKYtHel2oszskEvPfQwvgPd6g+++pPfCF/gbfoFnw
Bf4N3ptGwkfzGKSAdsNH+Ne9/z6h0Sr2VTLhbql3aMoc3ugsx9bQW4bnOIeXyjxWXVm5JKjHVdbg
bqm4JKjddUloqOJE9FolM0MTpzRf8hMfeWy8B8wsFcVdtWpmhBaI1cABjflymVCZeUhC1CY1cJFY
x7x72KIXjQNvKI9U5IM5sYCU0scVUgcySvvA8AS7gTl0AyN/+7tClrphu2mIopUodGQmG80YQk3u
hwwtUF5sFCDCHRHcrnHb3RINS0WWNftMhZF5dlsO7Jj/9rnjlefm7tFPOHe0wM2IoZ3JB8+drwtH
vhVfPAdyGvKo1MUzvLcq9vbcZOmXyI3zfnNuMvxL5MYxiNT76ChxUbbIsfBFdkYU5ekcYFV4KJlk
CdGklcFbfW2frqp2RNYqQtmlJ9a09co1JxKIHDWn6LRlkH3lv1pXbUJquAwDcaplNvxqFDG2CaO6
Gkx+YzUJgguYBpJeXAxORepaKdjKnZjoTUzHgyoWGbRd4rWSLru71fPMPyLZyb3pHEQiUS91gjCH
G5AWpbIinNqdlx9UyM6qUerqpU4PbVWr6d5p2SSnqHDMj83Z4sQzgglq6Wd6zOfRxYEcoMOjTbmc
qqtD6p4ohroJaEX4ljqWa1j3TY0XvKzTKFEL0hX9ojqAzgebNZzkEY3iYW9nm83pPHEw+fBjiTua
/7PQHUjFB3Tkuu56BxU6IfDh2kFEYfgOKgeHMjiRDLLnG8I003pjvCtv4lr/mXJk1mWFrdFmdUnK
TKcKqRFkbbucYURFZMOolMy+cKebrMAgjPrEJuEmJpveBx1n3MlFH2x2nFsWxm7MdjFOIUvE2hvH
KWAsXIpq2UlIR4xdqCVNXFR5khGIoU8vZe2kW9gikLUIYtyNUHgluUs37gbZi0tJt9BqIjqJ4wH8
+2Gnqx2SixmXMMKjNmOXJFG2aOR8g3JJEmWLjkV21jO1W+Fz4u76/0UShSWpSIpIbevxkMrSuUHG
SKrBEhfDlnwiYWNHa7yz8WIWnQUiUSSpFWtrzY6JHumVfxs546eSV/+iEg0TQbTlty5Hfx0iJhhV
GBrUW0knsb/6p4h28mkVbZCSdlVMcs6uiukAkt7jnf6U9+9o3dtd3hoBwEhvT3m7Rz3grETlP98q
M4WBMMA++QitaWKIO/KQqFanZcaXwhivo89Es6qbpsJ4gXgLQLdORj8ocWay+WSd8f0LWzbAblyW
sP2zDOKKniI4wNl2zVGBN/D+7JyPvKfldIIbN4oA2019QZf06OmITogN6iehoNNys+GYytP1pDnH
TZ3XCgKFEtgluUSWi+t4pydJj7njveD+ScA/X8hlAaakCyKSZWjsGK9XridQqpL+dw3GlQvZYfzb
zbtGvwRxYyOB5uidbZ1csIexa6OLDKizQvxjGJxXZlY5mB4zCNhx0SjB3Nd3eyqoM2zF1y3RcV3A
Xbq5sxY7YXgHZzerzL7kap5fdHbc/7O3B8NVjLJ8ucm13awuL//69Zd54k48SPUAfj/AF3nn3dHr
/woNssnxx0LUvnt99L/+O2uU7Ztid37LQvpjk5gxRTz5Xf42Az9Rp0O+4GSosTY7IXr9IdvY9F3g
cQzMRgHUBRp/XVJkvBWJpJy9ahARciVxK8bwAWh/XF7BKWFJOA+FenbSPS0sTtywSYlNxFT1NX5b
yKItL07LGeIx2MA62G4Qmicr5Abn9WX5vlwL0rVB2t2cwyJ2tirNMPt2+ec+/PM9banfLv+FFriJ
4L65rKlU7CGsxJkEnsNylwToq9uIZv9QuL1UpWh+hmS8hBZ8rryaXKxgv8qKwfuqAaH9Ce1Q/Yx/
WYIrej1pF8ZUQGdEY7GLpRBDxHeqDnKM5LPQCoaSIqjgpbNEbioxYAnmY8LnkMnIIOAoMw/iC64n
l2Oz6vXEoZFMnvcMcJUNo31HJsFNDMEgEzwyUw6MOI/3v3DQAhMWydZ0/PDkxHZvwTiz5tPh8MQT
bhceVuifc0YJ915+n3r5LxEUkRc/Ztc5nhtycHiCaAf5t9Dz7D6i+HvYLBIiV8CNETzmLfbxofo9
JRx+8yqK8xR3NcA0x97GkY6kZGxfQmeLPAaHPsvyPVTClJwMFYLkVIsdrHJZCJpML04lbYnDP5uv
ppiHvQTKjcWHyu7jKOfQ7ntUIeXuHRyiZ1yD44/Bjg6HJ704nktIDMO2ICxhwkSXEfko3cnEJ+7C
MaXAMaBBMA3dQWBtTfqXfPcQwdi4oYnGxJTqUozMlBhcDhsA2YADWXRLb7MokvtMuwFVKJ0ksw+S
mUJMgX0v2y6aM7FSwlywxpKXZTvCFaat+HZ566p6u58dn64xopSFWDnJ7lKApbsPr2afo29LGtOO
2+qCqPezamZ7sIsnzZWT8DOKpRpGVt3Dn9HCc8+5IMSu53cpeD9JvRPbZieUkFeloH3rAArFfGBQ
+13EBGdb2II51BabKFlrMtjybmA+v9U2iMrARkeEzToVntCRHGkaB0+/fvH066Mdk5Bs2x0Or4DY
WCDg1lk9nW6tKZaRxdYlBznrs2xgTkZ+OdN6bILmEtxISUed7mdwOPm8O+gkJ3sn0avaXUzgvhyV
xvNJtUhMXsu+o5fSlI5aSGM2ZG6GprEoKEInP++mPPist35L4YIEUpiaENwvaX6L1reZMb+NOBZD
39Dpp2skaxGb3GigBgQT4J9wihgk8z0MDh4gGZr6+pOucvWjM5s9B4ztSsDSRPS3RaKdWVBxp82o
+FE/+yXJRcQpQKzb4JB6MVP+BA2z8VLa2oH26ze0Q/VGvfVJhtraefcPr//9WDCIGBT/3R+O/ure
v0HI/ewbho6nIzTIrCSIX6PUvdmu2PhuS0j0lMAGv2W1TOSvKuFZkz6thFxE/R1bPKT1W4uhD89f
lNOaAmV2/GGhAM2S7ohodbJ4CRuABfGRAjvT7SaBMVOY+iz05QBSEW450CcyBPSaRI8JDEHPtzdf
vzh69fSIDYDLKalP8ZBG5OSd/MzbXQGv72CgCxARcK1JD5us3EzFJZCCh0woQs+MrLVV6B+rtker
4u54fLlGuWQWQJZbU2WVIAI9xxJyulMCMsuT2c1XJs1Ghyo2cXq97rUEwjVpjg9POLQyGWH78ZWD
UMHVBRmp4j+pCMomG3zGWYQ/0sEbvAjagMCpoOqCSroNFLjKbqW4Z9UV3R/J0kBCLtetUhsssBXZ
ZYAA0SSsOFCptW1Kc3tEYWWnmyvzu5o1SWhuKha3Tvzrf+KqGL8YHgJYJ64OkZ34yf9sqofv5jG4
X5kR4vKs0TbjU4I/dFBXU6WSsBZeHMcZGA4OYpFIlVRsJYWHLk/EnCeCJadmu5KwIQXaTZ7SCZD8
XbZ0/anvVaXugfAJKcfqHFiPm6JF1qJIFHDKVNAEjLrma9dMtcFW96c3mE7kx4WxcO85boEyBDHL
iaknm0+m8OHaNZ4ngNQ9Lp9ElaJIUYUo9vF/+EjYTNg23P+bHtbBLki8ql1NpgZWljDQ7lx8h/wk
VB/seeUaapX1PodzLjAFgZ0i1e2G7EmBNbNK5xwvhKFE2Gg4pBbi1g0zi4YnSHjYbprokiU1LAob
oTaTAbwzVFDIg0N9vKCVid3wtjIqjLWlHFTLdotDHUwajlrckbPElmPCVTYY2EQaYrIZPmj9plwU
E0nS0WRntYDV8k+ksZWpHEJdvJ6HoivHJc66KayXRFZbJ2rPJ2tU0lHgzDgQsCNJ65wGfJmXDpqg
8yjjEwwwCjVd3Q4m4iFqENlSbLKg+I84b46IpGXUIZ5royOLjzqWAJpWaiJlHxQl34kuooK27NO2
0a2VVTZETsNX8mH5fMuxqd7T/QEOKnlxYA1kAhBH4ZosGVdww9bTtICzAss1o4mVLI2OtZpWmwQy
jawGOt+V5YyXhWmIbqXuDnCJoR1tcQ1FxotRtDnQYbOqlzPlMeqz+qZm1Db4cp1J5K+NHlUobSDg
BVguJWGQRwsgQHkNpZoQ4TNzB1NNSYVs6Zzrlz6oMERGWmI22TNg+rg/aVQCuyuNeJw1nh2vVMPj
kAGHdxSp/bjYsR7sbmzq7UnzXPAk00If9trHNDGFFBxQmfyRet7FiOzDFLqSf3mhT9IN9wUG00Te
OuD/EnOJdufxT7UJUW0HbVsRlVI8/eM3T18+/wpO44+/7OndaTN5W4pDOW4ljqMCox2urodYzPCN
cHWp4w0CFIrAh3JDk9ifsH1INJPszRtq4Js3xIpld8HX3Ks3b4y2WGI7Ukw8IndXLKzyVyWzLRtT
jFqE4cTK5QPc5ZrNA6rIZDnfXCwcMCuhxP2nuQCU3EnKBb0ektHAbkW6u0yX3X9eCwJyR3aLrQfa
4ZgZvLWjjk1WgCyL1TUcPMYW5Fo5L8q5I7y9lLC6JNvCZwb2LIyhH5OAODYGDhuz8kZ/WgoPbU5C
HNenK1Hs7jbOSZ2KgeLQGW1aLxZ8jUu8tRCBjpbrgOLqqYooHBNeCK2LXt9jLXYEKEqQjg8FzTBu
+Ny78WQ24y2+oBCBRgN9tq63Kz5FwEscHHpTdHkzWIhATS8Hroz8wPCJBnGa4BfPG/wK6WBCtDPq
NsBNyvEGph0ocwZNglfn9aUphl4SCbT4dZyXixVn0TogyY3xYulYcHqdrRbbM7QjgAMzkOjSGltK
F6ETcHQrukqohLpRSTDqYg9UQ45PXCu4erNdSwp3OGHYe5LISSCn+Yb5wclP188kQrqLsAEaPIAa
UhDq8/jeYHWNw31vLEqfvBc28GxRnx40m+sFQwCgpwCIcEu6LPaUQgLXb3VDOxspAnrbOBXdI4pr
HrWGl3EZ185Hu/0qtyJ9e/WbW1bvyaLs0eo1Ri+c6QUZchAqeMEWLC52OP8e8KoYaHpWJ3n11hQQ
ctqH/mI1khjNc1NACydtOhTmkSiAmFQDe7QTER59J/dRq3Bb7AWrcF9YSlKHqtRUpWoo7slJzHgk
y0/jmOyNqRgClf6Aymjy/JNlFV1kF11pgFoZXV0zVECHOgLfpnxNb2jQkP0Jt7f7ThPe3VR4IEQL
FxLBl3SzX5I3qncwxNEZuCpEXkYJWRVmjQlOUeww/EJy8MUcH0ANcJiEfUYZgQ7hqiw8v9giSPJs
bizMdpA+DHRxT9mEYpj9Rg9eDoUcAlc5Puw/Oulll3SDsEDxCk8dl4wqbU9zqjgRVYyhlkOdriXY
MDbvcMRerxPYQhH+xL1/NFBleUFerUAm0phqbUIY45DLrihjKCLF925NWS3ahsN+pn496meDwQCo
jGRnPmFO+PSHRKTao466DivOVDDIdvZci6B3VLsy0ze9qOSAbyDK6IcsLPk1MEeJi8lyckaSlYh4
X/ELm63T+Y1WwGCk5kmz0bXZsL0GUpSUeo1DXbZxDQZ4neXeawHGufbm0rJ8aMbGzUjuHYMghfdb
peMm5NlQGqM+icQFmVWdHCkC3vED7KxPcF+CF/QXfj+XYyC8Mo+qUCMYw9dnlvrz3zHzrtfw2j6r
XLjlL6y8hP2FnzKx7EP8PQx/cOgz2puemgMmajjUvQN2sFHnPKc/E8I3UiivPg7NhIo7jpjNpjSw
hzU2jpZ1NqGiB1yKv0etrklhTzgf4zGxHdYQY8wL/IRhDt1OiSK0l2zAQBheoGrS0zoFrSvHytfm
7O6+jKuG6IEDXJB0Grp+qVKLezqrbzaRsBSQWfLbYt5GlcSxF8xeSvqepddsmWA6Txo+nnA0lWKP
4Z8TAf2wv3f08p7JGG68tAxI6iwYtI8PQzJkCEc54rDS8Gjmjt5isJrVdTeK+sP5B4bFoOqk2oSx
sPVYrCYbF11BSAsPHijz5VoqTjmLmKDX8+UFur5iHb207cHpupy8vfn+XAk9VP55Xb/l85Du1RnK
iPVbEOivrgs/WKIsFco4sGvDDDTGTriQoMKQbaTHfCRDHyyrlqzBTEm1XyUS7CgPSS9crfIdsVE5
RI+74mSPtpZla6gDEyXPyMpjreV+yAdxuqPuNW2jBnzDuQWiong5XhQMV8kdjHABRaLTQ8VwcBfV
Fcbh4EskEqXwlPIE9fp4IYuomoEHi6uUcuFoACEitn0iHhFtEYiKYzOhBdF0C+fcC+5dl5J0eyml
AX0qeLyFGmxBRiPjtQiXdtggEn38m2BzTSdett/auvGS12z+pMbBsY+DyBDqlTl/1OtghlQH7M62
sxOtiOtYBGlMveFb2rNe4UjRxJ/XzbrhRlndbK8nl4TzRTkGaMizmJwhGP4nj4BobIm+mjN9UIL0
Yh+grpkotiECAcJymxoUPKZOIUfYjS8YLXCjtOwaicUzw5f7529Qx/UEQ+xdBXjqNkh4oAbrMi8Q
epsumkQSTZH2QitOZmSdbs9rzle4ogrdMqV2wxBvARL2PKGK2xUgwEXY1tv9npf4yn3SWD5Ytypo
BzWvrVJdHTW8PX53WI0L4Z1SzM1JMzdvnH6uq5xXRUvIsyuzYhvvwcFIB9LuGx5Mm2xcjFeHL4LI
txw+jfZnMV3Y1PCLicr6vE4X25lsPkl4bbvOqAPkkLHG+GzV+9JcViGQ5qSiq18uyPf5mp5PnN8b
cgJ6oaaIfg8Q+GjtYUyZeNAqKBaFaiRV7ZKz7UJcXxKGoqHwhPCADa+WW/9yU6Kskq43CtWXqoAX
Y6J4MawhrW65nIn5GIpWMXmaWuHP8fDgk5Okaamav2FbQGZvRtttNTlmtthIt4eTjkWrHRn5UzyP
OIfdgcQYpkQx3q6D4xwc4x32cTdYFNZuqXVleCkGPswilF6vNyiBx7FOMeLcdDI9B/r9dWBTZUpK
wSLwLswYrmMyG103G25BKM7eAWpqyszgvVaLanPtC8uNeDrpwIFsD2ZsKU+s9VtIkJzZp7BpN0GP
thZ+ADo7PIlcOMheDKtO9mzXCaph0dQWkjQ2k23NdFeuaQyfcmliozRj9Iz2aJEtj1d338VfN1va
EyN7FG57MxEbB/ajiQRKG4MvhSXBM+mMwzHfTehY/5ATK7YTkTWQPSj7Ui6w17ptEnChcuHzxdcf
1k6j/f/RW8kjHK+9r387IHH+fV3NsjXIsfWF2eQYF2JVlm+N5ZcLL8y3wqqcAhex8XleXCORrGHh
TM292wRo4JtrUtrhPohmEHg/9eueF26YNhtvjzWitkQn7sOJv+fvRqdwykAJDF005cyDBL12bMj5
LYRxTrFKw0qlnIGL4+tkkHLp6xoW8aYImVifDUUOk9GNScSWNAM8HTZF+tTtQiGXy/QGktw3TVM5
EHaIsOkjp7B9uTmlugNAqjly6Gz3pGhtTRy2hu4PA3TduHXH8CfegBeoJ8GZgq+uoYsBXkcWb8vr
0WJycTqbZNilIf07ULtO73j46CTiWwu3ROxo6HC7walZmCIbbpIWwrB0E5JP+GDK3lHlalcY+MC4
hi+OXJtGtmEjf4f1z3S6E/jW74g91wTX9CJJeTsurAloR5yWzjc9nYzRdDExh0DARywK7Q49rxE4
KS6bebkey5VAIS1EqO6mL61T5qUXpvYW5bpC4rWKPtxlLwbuKG5FDzseVFsvumjDCOny6NqlCulr
/r3bFAPKH6kejcKOwYwhHYgeTG8Lmkr8G0plbA3DiIuFS/XgH4kfttlSUEbD9Qj+WEmQdKHf6Nnm
NwU67lHOVOsHK9JNS9q+HcuRvU61uZ75RsGxJsck6PaioOfmKpQULwGXJcW0Ld3XlPgB62PJ6U42
mc2svhe6OrF2obCyJPKqp0J+pD77PpKz2VgKGjNcgKHVMaQVAjQdAbq6UJFyzWZBvYONjO6Dd/WZ
OP72VI4u3bvN8d3mBL34uPemnEE1ixl7YrxGUpY3bjcaG5laRuaBK0YGZZbPTUUA876s17Nm9GfV
5CFuX9+LRuqGYVV6VTW2Ho+wniRs9euZDjM/qtcXZMPqLIb4OhbtS50WSkqp5mzQiTZmYk9mbzCK
agAncHnrblQZV0DyGxSOS7RSWm7E5pvukSxqubnaPmWIYjj14921FEBWsrCRMjIX9fQLwltab6ds
1ScmufZefdBxx63LkuxxV+v6dEJ4IyT/0dBMFmcgx23OL2jdQUJCErvOEATgFUzNIzOOtbP5nk6k
wikcVzlkhzPEpSWEMitKaCDisRGINCe9tNHZ1AxndNIMp5CMDwx8GSsJ02PfMR6GNL9uvtC2keyt
mfZQlKQ59y0KjPX6I2uGKgIhvjPeI/Jq71UsF1a86oy5A+lWzEIwrUxLjCKQSBEsbanMgcOKXq7y
7ViynnBocl/F5vd4TOMxUm9gP96IZVBhe3Ec+DS7Jb0Ewbf0akSn76CC3q7aza5lWhqFgQoGwk5N
gm/i6+UWRQvbwrFNDx9w87O92s/iMmurxJ4/Yz5sq9TjQi+aY5Pd7e0xWWCkqbVRYN6BNXIG44TB
b0DunVfTarJQ7CFvrCk7Mxy0afd4jlnc1UVpmITwHVgQrGvV7AbWCXEhImRjT0yejwO7NjQbUKZT
49Q26q0HmmIiOplKobpoJdwRTkVrfyJ+HtRL4BPn1dk5Ia5MdGgJjtGuSiDbbBN8HT6TpRBhtRBP
Q6tivM6HbYoQi2y7s1rHb4cxKtcH3AA4UFbNIPsDNmWLgRbQjIbDVkzPy4BDEsDjub7SIC0raXrZ
x8F6sDgnaMN1YhoKzXEjADnK+fORsmH2l4jkgzUwprRjfKH3WWXQnLDiTR8WzfGJmz0yvjM0KOqQ
qGr5KpCY3fCQbwx9lYiXNhdzXyxZJqJgpfVlQunLvbRZI4NksqcSnlKJ/THZ4zwyWFBmAocR31K0
rdf28Y5CbLpdMByKaEaZWoyFL/BgaOt+tg8Dw0nmHXNMK8yKViRH7coZE97O5HZB9038rCCqx01D
596ftFFeO+jDPuPuDODhbcc617YPT2ASFBjzUHrr5grJZbaewzGi1cHVmVjSnzGChKHxFf3UQxO6
sFoTxZEtw0/glSeXHfZ3nDTg3OErHp2bz/JW8kbBc6Ng6uBQe06OjtlUbpSQx5GVnxiWE5wsv5Iy
5IOUcFGuz0pjBV7aOzJKM7AbyjnhS1IQt6BNyRtSPnZTM0aSd+De3cbZOVKM+bdXrtCkTswYBrM1
k0ka0zW+NaO+65TrynCphTSNaQur458RwJ3S2St7tycel3UG5oIiHMAb7HepynoAxEGuvitnpEnI
8co3NzEiGaSAWi/7wU2K7V3aogHZvrPPVSVhv+L7sC1s5IUxVqQkA1OXVrqFTYs17M5Y5pIQ31iy
EMC6Go0CUKbi3O1ECd8zT8cIJMl1F+USl29zTa8DLBMh0lfXy80khT4D30VZZuw4rPHoc9KaJpzJ
qRFCB+xTLqWQDUi5WhfkFEEOJOtNtxe1RveC435/VTVkY5ZqoVzKQd6xKHKDS7O2VsX+WgJ2Qd4a
F1Ll8Ntlty0lTJBoSu+uCfCRuJe5BnQweukysuxuk/zAPr6yVzk3vsZ5k1ILjWqArEepX7es5/fP
vz4aAkFf1O/x7mZ1TYInNPxBhjeUDEqLy/YBLGV2/EuUsl1WsJnRNQuJQbjur+vtWrVULkzjzNnd
rBxE9puOIO6s1nDWVsOt/GW6xKj9FS3aUWbwZjnDy6aa4Q7EKxDZe7icsSSHSY3nk5hjwMuxVYxf
oRWcS8j3UF148Xrl2f5YDFuVOyn/7lu+TbS7grSYcwe22KG+6waqnb7FgWRjEfrCoxcoQHFn2ohn
6QRxaCvxUYfZ65PaxumKjMEz+z6FcFB38Ih5WZKTB1unSLgxaTyb4ERGHe7qDukFngvd5d7xw5Nk
KGebQhkd7APkpDJ6gfx0PHN3/7iBM/YX9eVSrNgTEQrmjBAbT8nOMmdQpjRjR6E/9Vzn/wqTDV1N
zzEFVwS5AD0o+IIPzfiXt5puXcY8GA6WnGczm6awTw5nkE1I95aIjIbTk4FaJBU2aMEDgOtSSm4b
NG+rVSFALXebwV3WqdrDQ3ZZL/MNEppTYIfurRqL27Nt6e9KhFw/TNKLOOxx+jbHmTfyFUO36HX9
e5mTvRjzVEyAk3wzV2nyBNvkzK0rya/BLVX7FsPcsA4v7+vUvVsXY6CX9iwntKQSYk5UErMVHo00
V7lpRFJ12HfeaNi3vVsVoMahpYR4XarVH46OPcgYckus1R3nEGVr5mw6E0GITv/0Ux08rAdU+9Fj
WV7ak2SqETuNcvXJi7rlVBQEP+Csj+lRsTs29Z/LyZwO7ujbxLdVz7SzrzVbdxgOu1c6Fg+dp4s4
Rs/jahDyQrs93hirlmIUr9+XM57NPBmYmkcmSNoalFoRR6tFa0xGeo5SARF2TVFriGufTNOYYBI6
uGVHYQ4j18bCPDlHHvg5CS8xSZ3MEqSOG5uqw0zgvrXE6V0543otrfB3BbHGNpUndoMg9w3bgp86
IXb4Ah5V7/XotnLdTsbnsTzL1Vbr7RLdL6blKYiAsgzkUL4zWiBpWYIgcb6fPWLtUsmhZmtWKqXB
ExvePZLI2B5KWbCSvh3y8AcG9E3Yt4qdEHcJQ5aIjsG+868rdFL7PJhuN9oXTNUzUs/xLYYqTRWX
UCnrapct9SaN6vau4qZqYFzQ3JxqY/TOXkrDM2gpQqxWe16It9WacHFRx7269smpn4lyZ1Evz7o+
cZmqyvU60nIGnskJ+1YMlC0F0D0B6lxjwtOb18ZpnoLbYG8v9TY1s6FGvTT9C0DUsa/0r2/LbrOG
o8OBe0KESbn4kujZck6E8z4ZzfLLbQMiQkbYg7AnYSibWTetKL1Ny+MFvTk13ZE933mXBQOVFqFa
LYlpi3YeV6TqJMtZB8KDr0+3BkZt0dT6Bnf5vn4rd8cPDAPFW9FVvdouJmtzp6JtjqslWxifXosw
QnJIlwFcunhhwfF2ENaPT0tLitXBIAK9FqGNmkDYVgOYCdxpA0ntDiEJeKM1cGi/AhBgyhhXjRMy
xoeffBoEoAsEkB0bfGB0HFslo7RU9TOyMUezAbIatGy56AWLzhmJ0Z2TNvojkC00hbvqpdapxeDC
h14LOH2GmMASTOfuOhPHcci/tPmZZu6u0WTNHWGoRb1eJ2kV3Xa1bGzgju/O0AIuq/ZQC9g8+d0m
p1wpd6Pd1thx4CDoq0P3Y7QKIb3lgehSqUjX7QgO62ZL7oWxhKFhC6we7TViM2KAGGOPR9O1ywZa
T73EafSEx9igm2asIMAUucRKCsWUTq8gRnjcyBptuSCDTMcPT/pePATUQzOCY4Kc2XPGhC6J4Jlv
iG8hJXi2Et7yMO0JizLoZMGCkvtjo95xQ4k/Q5kxN4oc78hOQn21CZB/IffPDbC2jU+R9PtlLxAD
sOvAL0K4YDx64Xe8FiFoHmscx+7cLeevwCGYGLQgJzAesGbaN1yY6vthBG+ZLFjKJPv6SqB7GLyQ
DWLEO15ftZmL4Lr8U/MWRklugjOMjrol2Fu5q6SUEbjEHQ6+KgY7ZE0E9VzWBNl4WtFVDAV4pWYO
fAN12+mbjdSTtukaSopOmfYF0Yd/iHAjHJi8V/tMhwOjeckvitg+zZgwjD2y0UuYTh3/D3Hv1ttI
kqUJ1tsA3AFmgQVm0S8LL2pj6cykGJecmV6wk1kdnRFZLXRmZCBCUZdWqimKdEleokgGnQyJXain
/X37n9bOzezYzcWIqtkuoDIodzNzu9uxc/k+JAj0JsufGt9P3dyBFbZHcFzJ5npVZ4A5VG67G9r2
+G/izlHfVeUkQRjTje10JkvgFa6bbeND2ZNaWFxi/ajxhE9HACvmyCRc0IH89BOku47N4nP2yPSk
Ag1YHmUBzDpzq4Q7s2tZJs3ECgXbkCuW0+B32orS/pRYHfbcIsDgBmNzrpWbbgVMFETSERQlLuKJ
RrFfKb1Rcqq9uiQ6fwbJ3fB5Y6FiWhrnZ7lbz600JQ+9lOycqNNpiFpOJe3w0slDLyU3y0vIz7x0
vtOqTu298fPYqYPAEHo6+enUfPEnRzLVRIuR/tNEepk90YwKz9WZxsaf3VSzW9grVlsOe6/mznHK
l1UYCkivCQcQ5I3kI6D53fkOxBVwAyWxDSxlfT/UHz+QDv/MkSioigVbGJ+V/1LtEy4Rwpnq9SM4
JthleICcGoece5dMPRyY6h8FaMHxXM4z/jzdYw41v5uuSyOrgeIFtQx06/Vmm+5Es9Eg+o2DgwAM
Su0Q9wncBbeAZTuwQSMD62Wd8D0khz1gQHroR173A1MczIR/r9dl+I0kDEVy7sGs6wTulErHxq3o
C6qU+TsuWbnGnwvPBNU6UQu9qiWH/B3OE2xh+8ygQqu7takmTkWLnBfcO/S4yfVDRxZ5W5lsRG5r
08MshOADKzrQmGIFgoHC62x6nbaMykM/fY5KveItHCRrqw3OddhjfSBY54+VowNJ88s48Wl/6/S3
S4V+1tZfbfNnokg5nElIFsf36GcAAry7NQD8a8X+1ja44CcbhOVZhHyhk0GcwWoNvvgo0gMeP3Fo
EATmlfp618dvcO5XPkZ4vFHpQlI2J3NITRtFigr9eHGhECibiwsJ4T9+MfzGr4e2RektVOe3fpYS
WJru1bz86BA/vUhUDkLFQFPaXDnSNMVww/iEY84VCKLimBQ4ItrFwzGbHnaTS6Da6lyoUTPb4mM8
iaNDwpzJOEtqjcQbf0Y7KPoqITBPiIVH6RlI6LXDoVFcQy9ti1IL0BjkKB4SAwz8kI1weGASvpwj
Z7BH68FRHwrGyEfYJeYOSAL7kTNnXynkXLHUXtefqqWttbkSv/WD45DfmoyvKi6GGfxIu4o6XEi6
vpk2FbF47Fc7u3pJBwpX9GUD6EQpUR7CGeHKaNJuCQKYewyOGowpXIFzEruGS3gSlQxBSUPnCEFE
JXJrNDd0xNg8bkBli8o3JvqYV2ZDhR/I68JwthIVmQtX4LG1sMBPUcXFXYs5ufgmUSPq+hFRI3MQ
oRsRSxfYAB20ebeFgYkAJmFASCnOuusYKpkjE2kkBNo4DKbAKzYg4buD3dUF4j0dMUoNaL5eoW9S
pTXFPVDv2vLUEijuds3Wx2N+c0xwyr50Ruy9uN/j6+NqQVQSDl55SjUhtfYWmPvQum1mVqpS4TDI
xLIUNvQxG2MZsb5Ml6636oUvpF2Shxt0XeMHd0BOv3K2FE1VFKom7ElZbykQl90ecG66FURmBLUK
YwM1LUbi58Bpw0sH492mW71sU+vn85lx3Fg3gbX2SxlyHDVO2OmP0uTYyoTtYtonuCrZaV9vaS8Q
8hQMDuMqOgomn2P01CY0S8VqLpCjwTJtIROC5LdYf0FBJ1uKNSd61E9gI8X4sr3Mk2M7N0gAmgvT
HWoctZIcOxu3WzPm8z2o0GdcST4RGI4YYAOR9Isx4KfEc6aOnY5ngZvXZjx25srgaWTchldtGyFE
Y5ZEjMw0L8CfSRW1urJg+T7L1z2cZpAXO53nEQbZrdeLvR/swxrZrZMhQSTSesW0Uog+ABSObqpG
cD21bFq+sUyB2gfbVY1+19Y0QQkHPutmwhRmasmRRF5Kzo+Q358XYMwZUe0J+GHSDgv+n6kBK0Ht
/UyFbteIqpTNeMY/bGXPqShvoPzq2WByqmcIc5YYIT+yW8GQqSFMZOu0mKachNZmn1JBc2cPwpDs
Lp5WYGrWRtgvu4NuHz5lU0ZxiBzSjpmIynyU6SSelyXMI/oiT0f7+jxsnZMtOvny3P3RFPuVX6Hz
ThwFnA5X4NBiFx/cSUTOU5z6EDUVZRARLAMi53DkvC5BfBAWBZsi3uOtvLpao2N6hfvGb3Io33k0
b1b/MRBAdEvJwE+lrLloOFpaCBjWAB62YL3rE2koXQ+xagjoP7mTiGCB1DBd2LnlAtx0fT0FbZow
tEDpBNZAHubk7hU1q/dETNPudEQZ0zw3BfYebZ//tYGtST8e/3nQ10J4CtH2mySfiN7iEze2EFxD
X+3MqzNfrd8/T8GiW/2a6PsOOwaU6gZazKpCWOXeKkttpbbGCm/C2ihacwytXrRVI9qcqWadH3ig
6BMB8C68njGHQqtyNQNrahWu2UZZ9FLXyH7mpi5J3FV8OqcQUV9/yOqPej5W2x/VRj0J7tyl87Ua
YACcknv6eCOf4p0cEWaiy7hHpeAk+5Yrs5bSd2CWfrPaVj5bplwupJWIG87BoqpklvCC4tebGrbG
pbu/w0BAVxvxDcQrZMrcsQOUA/0wNfmjucfzfowXSQwHlDroNe1d+Jl92xLhOA4f19KAX9dK6FZF
6QRMnooIoggRSZs903wBLSjehSMx2EZMZr5Sz0dObBc4wGtCnIBeAYwU0n+sdxtz4sjF1zTTh4pE
PSVwIkNQ1LK4qOcXeGWUe0nBvj31PCb5DCuF0ww0Fu7WoHhfL/EGuGqY2pSRWkKyRn9PdWTA2xtz
N7kmNwjga1R31YuLUGeq9aZqb7NGZ2FUBIFZsZNbnT0MlO/ZrHJmQku01T+ppdZ+V593fCtX01JO
TgjdhVJwEiDEgMKOGorT14HMJOqWYc0FCbLCSIow2xL2I8pwcJ2r5t2E5SFnLUD7K5wwam9U2dE6
u92Ehoh67k7H6BRuM3GajCjjFJF1B8qBrQk/1WZM96zIbPBJmXpSu76FJhRClTZZgR2qAk8IFqY/
84SHqTHB3gwBz81XXVl+QUlzYXwZwctiebVYTbeIoAx+1JtBcblaLci3B7wl+wlxgytl/f22rh/O
bL3O+1/DC2ly/5CgyETBcPPS80pbuaxRl/P1PXpMSkvDlSLho8+j2mZi2a4gCoIi8uFeJ6Raulj9
vBRjx0R/ALkCeZ7EL2yeNEcXa90mLroKZ+xuY6RxMgcgRsHC7K0LR8Ji6XcAhsonPmVHCfUekOzo
TcLtCHVkLgOafGq07m0b7cRoi+6ORqFp/YyqO7wEr8hqwWxAm63J3z8vvsZvgLOiGlgqzgK+y+cZ
kaGp1oOi+1Tg37f31BP1aniKauvp4vebmmIG8BJTbS4h0ljMSiBG4soqu/xKSmIY17RzHKGFWbJR
byUrAKaru8hqNOrEwEhN6GEHzxxEHXVAhHWu86vzK7zRJL+dhHvxkJo0ppFWiCyQxtZUdcFHeOlS
DsmISkOs6iv9JIuzhMNBZQMnWdPANpk9+EhrMLBqYjzZTLUP/ZAdAPdM/D9ckxDO2a5EsHdYm54d
DcH9hoPkjjFauE4O8kp3P6gK5Auei5D/iV+L3TC5nTO3hgJh78pmlkL7394Pkfiwn3pjVhlsqWal
WaJBUfviLvmkASUDt60fFxF2TWDuBNcPXpjfjotnGsfMbAyIvTbpPg5WJGV8VzxLy0R0oe0+aYrj
Y66z7X4ZkENkKyqHs3bCHlSpBsX1pqqWAczNF6whYo+OV4F5Ppmg6sVTuZjHsRyLZH4A6L6aiX7w
l2XbVOgSYMvXmFNUiG3942V80gDeA3yNDTV2SkO/m6YPEgvRNI47S8DMXB9ZvRR3DgtTdCZzMCmt
t2V0HCoMB2S4sx7E1Bwb66d99yG8QAiiMO0wQW4Btwggo1lsV6VXL1uR8LWWGMxUHD0B4IPyail0
FF8/N413hFK8ZIm3kv1XqHPoUfmancxttNqg+EqYdlnL7vxf+OxA49eUPNYvTQ/dPvXxaflr/xiV
Tbvfcs4S+bTgyC52AYQCkM713rzle+QpXGHY72ZNLAtgLb24yGA4mXsfxwpQfnO9JTsR8zS/GP53
NHRfrj4Bcb254N9NSQPgA+2Ck4xQvKJpiQ/v0cjdBb777jvSJHJf/mu1Wb2qP9Vw6ONFQw3mcDiE
f54/fUb5f0ZgIbQwiWZh6mKF0MhGwQlTczE+vqyOWS/C8bFBLXIVGFgYEfNht/a+9ToN6vYdlbdK
1AoUz5f1dgMaCltB4dslDUhYHXREKh/6I5mpz58+6J44sO5Xg+KRSh9czsP4kOa/hEmwmQNMTCOe
VzW6phBiFG9FBFXNIRnzw8eie1U+63cPqMdbktWRRQ2uyTSNjsP/UeL39V0NMYMAnTXdXd9s9WrC
pYCGSZr/Aw5GqhHJGuB0oUG4y/HOgm4XM6Kep9Ezsy6z3OxiI1MpLFPUxdC3OPzHiM/rDep9zJQy
Ze3W6ERwbWYVRIE6xR6v2O+lVqaMptI1Qn4oWP32WTHbm0YU5cVFWK/jY5odUZeYF0bqABHbyDaz
Wxwu0TT6qQTeB1PTXQpzwDvsMXNB3tSwHBp8FlWiz5quO0Qf4g3otqrWoBcRVjbsPduguZj7wZuB
2kYcW7gVmL01/HDtoHS9+vbB0+8TqHC39QIbt4RdDMqsZ4nOJyXt+6qyxa2uGFKZK35xsd3sTc9i
pCeqMM3hjOuflHGWPXtebc22zo5V4N22uZuq8RUd3GRiI7Nv6jlAtSkfXHMwRqcHLMWXeALJkgqg
By2CHGDdmWniJ1Zp+TZYALJbs7skf0UbUCVnp74OHVlC7Wb09KnZDi93s9uKSLVv1rf/7QWzbD+t
m2ZXPX3+9/+DH9Bu4M50rRWYSv2GOxgklkX+iT7vVx7Oo/g47fjm0hg+/h1+XegeowIUI6hvS24w
9K/Z6hNshvxz48KLcIiZsygZZFWNpmBDgDZEdjX4u3weirT4eHg1wSnekMHIS2P9rAPrvyDrsdzB
q5ucYki55xedj1UIoB7eo0sfNqmPFGX1IrziKJEPDoQJt18+SC7MwAO0mvUDNDEwC8/r+bJ3WtxV
ANgtqeHKInUtmhWgcWBsN0UK/iYoRvRbFE9vOn61N9sIupzpo5ajLKI5kAzWaAPI9AV3DujiOZPv
WfQERskSFDRKuvzbV9DTjL86eVW8+fm0ePfy5P1rxzvqL4zHItrihRO4G8d71bhIS782i0MjxZu+
+jRQzWz42+GHXC4bEbus7k3iZIekMZC4DO+TD6q1X23X6rOt2zTvP9s1XLST9oHWwUg1zcb4lqYa
ERt2I9t16eUhS3F6KOA+ZG5EtLVPG3P6Mcqfc7Uy55lJE+BChRAR3JITk3hQtPq041XJFQ85oHPI
vdAIlgvy8xS3YMZSYAslnPMkBNKxG1j8vBP0ulqiqtTqihJzV5PccKA8mY9JhRm4bBch+YIk5qh6
M3rvX5+6gKSxhDmhpjNyxfcwSSwciVTQrxrX5xCuKc/phL4sSu/Y674JzyzQhFFzrOYTWzXKgVZx
6k5kJ7EmkjK4+cN4gb3Q4QTB1erPf+mnmU28aCIb+qujp7jOMCI5wj2uivWksL4OWfq9gz4r7x/5
7F/zNfe5hpiaJE6S/67mMv+j2NwwIvwR8rQwDj1dSZmRDqZDojCUDwbZvLB+/UcCRDJxIWpMH0HI
SvjThLZKs6LY/u1DdqvnqfMka+/NDkAytovIiDTcVlcq2u0PEricXYFIcYznykGlD9dJS3Bk3cLa
lIcR3peqQIKL2XWo18FZVmUGgvHoaDLRhbIpK3qvlMlfauuYxMgXrpthi3Txs7psPyhOMJMywAdk
rG4JHMMRDoPGYmLxnsWqE47xVupwgvzQoKIRY3IGq5JBSixJg5ugTNFg1hqYa8+6fcBoSDBgiXsA
WDuOnychUIgFuz4/mP5cMZ/H3WjudtBMS66pe7KkWDrwOSIfWLxuEMwHd68ZpuMXwxeMQCwjyWqC
ZOydXuzRan2c2N1MOzwku4y64RkDHQrYDukPUwGFrAZrDd7yQSxT3KCIPEi8irb/xmq+ZnE0j9iV
tr4Kx9+sMDQYsq5yWWDYrx89Fl1TvN0gGSn8OZwYoMNtJbFWBNYWGdPSWPfzyMjXACCHzXFeVega
afXXTyDIs7AGgk6rs64DPlZ1UraatBxH46HBIRWeCI9dB6/hL2CZwBnDMhGJbGANUq/PuowFD2JB
2SULPRgNLUS8l5jolGw8b1TG1wUASJt5HeQTRUWcVUqEnDbVoAjyWx/xKL8qGYqwCU0TROzpsmUK
88neQWDRJEjPVzMtREPaOcVRVJinDIFV0GPVbWQgZrJxj0bU2QAhspOCkMI1pPzZbD50p1KtI6kK
HpxnQQrxjh+7+JKxx18nJZhRUW1l3SBqmLTEYjUXq0sG1FsazWcJOeNHl0Xbv9zsASMR0n+1CdSz
qnn97ULD/aP00fvfS7t5E6iUdWkkgnDcHlexp+NQDBH+1o8RrQ1wslWNDZXlJGEQlDWyAYmH2fYI
C9FI6RcXkWckXRgaOlgVSRsGyOq4KHHaX+wTAMfBjdPt4KG0qQ5H1LLJH243H7keWYmjKCq3pD/M
z8sKD8sofjcUvkN4M1P4e4z0IDPOgGJkrwpvpeKWYfccpKnA3So4vtpCRuhiIoJ1CpnId97RMFHe
dcZ1kHfR+asCy0O1qisDvekStf3CeHf/4uW3ptUxKpaplqt4z4I1psQNZchAcrSSXMEBSc3aRWQK
0XaRgzKAIxhTWBGO95eOL66COcX1qx8o38/5aSVHzCPfTAEoe55aCXIMIlvagLkH1b7Ct4t+6Sps
kl37g8ysrhAnUalJn1Fa2Vnb7CXLqakHhKQGBcA2frsEBNMp0tCCOe1pEIqbIfVzfZK9p6M3W+eR
KEF1GKg9xiKTBy5w2bWTpsbTwZ666sTWaQbl7m63xSOM+EHAGROQWaDnKyDUbDj4XyHcyGILlp8/
FZ71i+Pi+SNzAU6R8pjK+67wfeOafjI4is+yH40EvlvTcez1YXZ30P3jt0ckANWX5KybW9Ok5kut
aia34xMNUDz4DDTH8GpWY4Q02+rcueDfMLz1rOYDg4FwnXwhrP/4TRgq54W/cMWIH9vuL3juIjeq
i4BHLtOD6mgVjUH9WDx11QQ2+0QNSVwpOXSZ7nQEQBDFj+CqtgQqfvVM6eaNow5wFST4L7OuBO36
e5/Qor7izEnDDb1SrYyhtjJED9A4eRe0byVx1wc37whCWLaEt7Fbo92VGIXAzCv2WUkyPDX/+d6I
ST+E8SitYGe60yZQjpW0Drw+Kiia4MquJBb/vFCivJjM8yShzpamddf1HXxEoWS2r5Y0FSDSW5C2
yGNH+pKpmOvTaCIygW52CakljpfL3O6DPG/NvgExFRFZNbQDN4fj/KXGjrnjsLpTBXJLQK6LqQrK
u6cCOqACyCSeWAlGSASbrRK+lRJztZGYhPQQE2QNphCb5EE9IBEQOtbR0UBwwKNlp/I+a1K6Vw4/
3c4l8s9ih7np1RYRQtzKM9sVQ+3AMcJMrJnLFHynbm48tY8NJPQ3FDDn75odYnbgbHCcG83NdA5x
UODXpxy1Wac0g4tIhCaaZOniENWxJ5ZaqbStB4XEWvtzLPgilpJ7/fj23Oh5APLe50OGJHo8VvXh
74/5X9UQxMeg6S2opvhHiCJF8Yn8kuIHQdRahHuLixh07hCn/ggZcflqt5CYTwqAFHciZlrnZcLr
zgFQLdEvK/GxCKeFckIs5Gi9H+FBPbpwIUKb26GHm3GRYpQjgnacSVMbqwvuea6QNy9/el0Oh8P+
xUU68jGt9fT2gjOqqweDETT8gAMsCrzjcVSKZUhR4Tse6eY6GGbxFY6lVueyRYBf5jIKV5Zg/3GM
qeElIyEHO9ROqopaXbF6UwVX6IWU0GsEd2g5zWv0Jejqt91BfOvuh6iHDkk6FyVKiBA6rJSOnXwQ
qn6t5XyhSUjeERUEMdCtzidOR8/ERI4gSFwFcANzChazyW6mZtqlcOI8k+A7OkhIdwb7tECusWs2
romLC/zqxUXxf9mSLi6kCuYxRV3CQ6wIKMOW4N8q1TAPLGYZxWLro8MryskuHMnMJ47AXTW7ywZO
lSXbDvXV29XznujBK1rYfBZRw0w1fw8z3NqFvsHdCUHUVGwHjjCGqV9ceMMAzvBGrBKPbtHgwa10
AZhUUxoMbyuLNJLkr19dm8tuRYpM4Zfk6GvVJhXQ704C3Ib8jVA6ceS+YqpW1Z/I4d2M+ad6tWtM
VQn2y3ZIgHsFL2E7Xa6ObVy9C0WADqUCc/nJRdD6dRNYGiD2GYHv4kJKurgYQM/Cdk0/ae5eXPjE
CxscVDwWTb8jyi19H9ERzLigp3V9VZFf7+rKH2u/ajIdRyAWUXA8elKDHlfKMq+hlFJBfsopn973
2VNU28mHEjTaozmD86eXck/hiEeVjEDOSfC5r6a3m+rqN4pLwqSAGo6LMtzQBnmJw+0Hfb8oa6mP
qpE/1gicCROdSX0OhJ1+1OtNowoJvbbSFsbiln8eoSLaYwIvuwBLYSYiQyGQqn8KIf1sE+kmtWBd
uw2SnVNl8G0IWSCfUmqsbCn9R3sDUT8T3R1aBe0IuiFIIEODCkXk+NY7K5F72L0l7SsxrxbZYfeh
J+mTpQfD9VmCuC8n82XHOjVxFJp3duY0xXDgvSJNLd4hNmB7qVD0hJTzxM7Me57aXl8iGok7MIDi
GDoUlJAVwqXYO8pltd2y2ExzDUOvQpFFcOvM2QpSPmoytwVZlurleqcAOvkmFUJBOicAwVBF0BU8
kcz9Zy4oqRSmNV1iNeznGRXVJ0VElXJb9BbFrlD31Rjeh9/J1q24XM336U0ztA1Mpoh8qWw4Vhoa
8ulrVuRuAUHpvsUgkTM3F9r1SimN9WHbWlRuFLlt2xnYN6zE54Et0LdiOToHUUZmuTFsWrg1ddPr
l3SYb5tqN19x4a+qq1HWCuD1u5w4gwJgAdLQZ9Sb4QfSe3UwDvwJ566h17aK9U5YG1LiNRWYSvyY
WSI1P9Rbf+rJxWUrnJlaEcD2MWpUjoKNUCGfjzrZqSO3Gi4PPIvc+6ymUsWt5/jOFkMjEFabJhVe
bnm9/AyMFZAbD9dk/mVubahKtCROXvcl9m9V8sjjCUISPZApd5eBDf+yuoLwKNjdkXUqB3J0VJSL
Ke7N6mKTaoldUAncAW+Le+x6KtZsVYx/M4j2DXFEdKYV8Zmy7CDRjsG7RelrDAZKP9BPf4TcszsZ
6DWT4FmLkxVhvUL44F0NkYAgWlwi2jBMv9Vifky+dsKIo9DIQCcNUEotX47bL1Q2gSUvrNOVQ8QC
DwuNakVYt4xXyUi6AqIbFIPQvluE5KL0gqtgNSOtUFrk2eYgLuPW+IQ7qk3RQvbLapXhZLoRouaZ
l1OBdqoVNC7e7y61G+tAAIMx88BHOdRrJeUoOC1uzFSoNscLs60sRFq2EgdKQxhlDuBVm4JSBeAo
Fk403cz/efeHI1iMJOCg2crFh0H47dVuEW+fCL8+Vv1pThckZt87HuX+X3lRgbn26FXFoYo+dlkZ
5IqAdbL4BMMl1PK/LAmLo/Vu4/sI8EMAsCCEIeyi/uHXnvzGeMRK2ceWHl2W1IbLfrQl98PYjZZH
vw2i+yjeTKK7wRWG4ML+QdseKrFFjVMp+0mCeOVR5b5GEkH7yOGQzupTbjqC6rn9EhVP2ECIkUke
oJn5sDhOiPfkoWCV8adixJS886522+XsgfduypIegFFpoKGrAFiVYzQEiIRiqhkmpvxK4ZPIM6mF
Xwz2kkU5Q+/gEXXwkyazitbSzIF0jPIWxgjOmGQVvuJLUL5HVby72R11rH3qRtFxV8sxjijKyL0A
XLNXXqzdvGrWgKYMMAPO1THiu81YJj3oMBrbCdZtYv3QdHJpi69lm1uoabxzu7aRF0fYsCuzrJGv
3GyoVzosEdqghXJVr2y9bcyKEheEtRWeKb9NpNee5KjNvvWdXnFBPdl8ZxmMadmKh6w6of18XlSk
WfWeZ+yN6R4Uh685GjJ0le1EqLNPOYGz8yScUPkbh8oJkaOqdxvAe5rPyJ1wNm0RwkOqrGzAYZrs
yr2ygmeKNs3zTPUF+PBOmb5JYiq90wetUm/aHHutlJG6akTesU4mCeOqUs6yir00Q5aUcIi1mYJX
LV6wXpZENwcWPUnuEWE94hAbNsXzgj1ofbolB4KUW555lbvyzXYLNxbxwkB38miPhAwQAhU8jiPt
iVKiu0yH9QizG/aRI/maZmxZDa+H5tl78SVZVmw2myqHO9EgoP853R/AC1scUNg5h9pljxHGK+NA
Ds38FsL6u6On7+I0rHjOugLeUZbVvT7F5DTRxdkkxXf+C11SPwgx0OZo9VuNxAxNhrCPbMQmynOp
srxzRSns1oBnvAS01Hm/dce0IrLdOu+a6yS5mtM7hPscvj5s0yQsHbUKYrEsYDtrgM/N/NctEBKD
YJGES2R7mRAIzf6FKHZjec2SkPsO1+lMtyP0zjz3kxvBfQvSFICUEthW8TAqHhiJL2qxkpaoQdI2
fVbAm3xEMFcS/0VaFFYEoF+TrDz2mlgWFeOAVdteA+qppoUITF0ptgURHGnHqEYrIK440oidXqg6
YXwbyLI5MTli4bYC6qCYKFkXpEc0cOG7TD4eWrOAQUv5hGVzQmksSr8iXz8PpGLmyBGEQ4d3TNfC
UfJGjXzCy2q4YWTDOHRYquRQEPsptTiWsuBSNOBmz0zxXsbChVBYncScSYCQW9Vf7sDxah3D0sYx
//6tKoFTm0cBIMHKkSynFpnnth+bK6YzzecMX8ft2SoE1PV0YL/XT/Y9l5Xu4wh5NtZ70RLWeg0E
zV3tlnN3DMvGGmf9egw6CNXnslOMGF8UApBIQ2HT9PPlwCncW++HKC0fH1uw1TN4ACvgvEcyN+C0
IXy+6WxFi8WnSXwOvYMd1l9EA9lD8ZgYeAdDy4mGJQm2MvzRQgNqVrJMN4uaqT6LG5uEcsUmNKyQ
Owb4VwA544owSdRf4Ym1cDI+/QwT2ApiGh/ks+XctG3frrbcKdz67b22K1ho1ISpjaeKNwADBYWq
ly/1glWFcKekYVjppd3f/IK8YlTH5aFhPXTXFDRsqs4xrLAtRkKbray70IKuGgPc7v0J+RNjeXpo
OuzL7EMU14RNgRQaDskOfXrNZgPuxeYNm2kc7DvqKfCLbJp/RWwmKijqCre5pnK7lmY/sSQjYOhG
CRsTd7Rh3qfQ44rWS5PWWTQtwCA1gByIaNJNt+j3tJtha7P1BDclffmG9c6WkuO7amau7/Ws4biM
LQTnUPgpoh9xPRDc0rr/g/SPDQWTLUTJoFICEWSPidvSuudmyjBviCZETPxoILNwS1Bco64asMuZ
OQHxsyu4nzgNA13eFE0If7rKfflmtZg33kQg1wU7ZxTx8yu5X26qRfUJXIkpEBgw6uvZDhA7lWfE
SyL8A7hFIai0hdZUDqh17i4RA7a+JYcHRh49hrzHYvoBV2bOym+Bt8U8PUbv27mq7WIVk/eYITXr
erd2vJOe/vpYl2+JD6cSF/EUZ73DA0bbCWf0nbLtbZ9ATK2vJDMjwmsdapoeMpxJq7s14LByJ1F7
KCRVnOxsfcFLUvP3wkvpd0lEuux6WFTDKKMzxCrgGjXB0PSLwgLXh0O1vdoj0wQ1+8OajmHsm2PF
cdqQU7nXaAjLdutsPnDzH8tCj/AFHgzc7noTTuQgeBvENnMlMJ8BKYbBOHhgJhPaFhNu1+PEw0xa
OOKT6eFFNoLcZ7AI7RKOWSHD+S169RT3d0I7FIVnN1W1XC9216a3yaEtipeGjaDamF0BkmbS0IfA
OpL6Bgm3E7NhTHh5WmbGsitO4I5lwpRTdtXK6/b7mtZbt3dIVRcnd7H56NAxetdV1lFXKR0wznHo
pvKeN5sC0GKMOcK8JOx5DyULj3PtICGqcWS8D6K2u7hFYg2d9Y99SN0n+gd6SaUac3a4B6iH3aLZ
nmCP1260VLPucuVwqiJffCBwbzeJw7VPdI/PD2C08TK8yVBhc6yBchCm+/NAZW8D1VLFqOmtDwDn
9UPlenM0CmannVaKlNJ1L+uysx0WlKPyIAWrkSt1io53EcYolxz+gEaGsrgGeLl40cK3wulK/dEv
MYiGNp5+BhYi2NMVVZRf7wOrELSuf8CecMZevClQCt8TzQGNHh2RpzjKxgD+JBR8LnAC76Y1CNb1
rDDnGnDroazDJ5NQO1e2QAm1uK84ZgvN3FvgjSC8z2qxuo+y4+ftjiYYVLhrTpyJXNBM8LlvqqRn
zqKsDozUhhRzG3kLNnauiggoqPDhBIKQKsGr+duAUEFzkEMHhW/N9wL9hVFl630KlM42Zj2c1xu0
WveZq8LzIiC9QVL9sgZqGIBm6z7NeILajwQ8TmtF3RRayfSlijouZkGKxg2Z3nisJRBWxdSZMcIr
ZThr7Jzi2eJRgGHvWzkieTqDtxylKP0LtptiyYlFbc3NXK8dYoLXYoY6t02vBPF2Ns4KiKTVTYS2
F46uBb84P9AuFGN8RwxhMJKZ1rjVkxGFErREOAXUFGXqp6S3Mb1Lz6qaKQY5f3rmVQ9bcxfa2Ll3
Vo/qr5+nAwmgdpIe5QP5g4PvuqPcBBeKEY+NKNWlYmewHajwz5FhoMAbjzcKcGUAiim6uWjHMvDR
e4ZqjECS9j6J9FRerSBQz1k50iAqD+fA+TWU70TuIfoTKcFSzlPP0ZZPOFLn4unoYQCSIBne/dQd
qims0HMdxI3G8C7ce413PzQy/hADLMxhI8cM0HtsLZuD+HKQ+5CvP9rfT/cDS/9t11jOIA1Dul6j
0YWPOIoiM83cYOjEpgJtb00s2vNV1RSCl6rKQC/B43lthuJTtfHQY6bXcM2HlVlNZze64doPWbJC
RVDJrhyXG1ZoUOwt5Hwqwj1AWi4b81zdMJTi3w1iqOPX/S9yjHa29/Ywq9r3MO7uqs11VWIQCVy0
+6McnbpN8hifeqpS6cWcSinGA6BDd6Y/rGQ6xrf1ggo2vgVeOY+fB6758or50VN1CTrDFZbNkOq8
VPRxvl9U4Adx2L/Icgo+uh8mgKNytIfx5EjDKGVMP5+HC+XZY/TQCgHiMKZL9+Xk9htHJxQ7hG0Z
YWXkNBe9rEYf80fM+rjnA8eddc0mbsMri+M5OH82mEMGF2DLaiELS2mZIILaBntF8zAwQebseN5g
kwvVIzdxGkNuuvLQLxNUi1zgZ1z2BEcRFSMSRBiGXXxOefNGZzd/9juxjCpaFqC9rK/2ZJylSYO/
vbO0gQgWCEX3ACDJFdgDPGjORhTBtNrMq82ESqXyVB18edzJnZPVZkJwyngEWGYBAV0ao6e1f+/i
20CLE51VzLHYxl9puf9kcgx1rugmIomC4wtgvW1xVr71lIWt98NEeryX2Cf9A6VtWfBmsqnM/rep
yaK+samEMSIZrSU46GCILuMis5v3UUJtzrAqoda5YIV0af5FyAMMzwnjZ3Bfq4quXNK4+C5ZJHyn
AcaHGSsRk8FlvB4VPwpKnd2/uBfC27IVhan6/c882WwdzZqZ7haisqPCAuCVFH65I0xXVTgP9Igx
UzeVPJDt1zKehHA7Wiy9Bv4wJioiSXoy4VlkRk9FG1wjE70Z4vvNSpnU4zlhzTmHTaa0QlQh9T82
NvkQRme7827IsiZgeRy0NzN6Dt+4+K/POSwQyH22fRhzXvn7sNx2D5UfAzwouCw8JNJnfhqf1Oz9
PC2ZsuXsvCWOc3gzbSbCuzrKue88FqGZHmQfgBMn33JVyMfI/EZXpSsz57aJ/HidqtFibaYLWwuF
OM9NhRg1zBVRPdTkD6frgtZSuKIjRIdofcykThTAgaDKMIhFMEz1HEGZWffWDDs55cXZFQlCSv6x
rN3eGCSCoGUQqBfKelDkxsHuiEM+cUbtWoqkqxTcAbKanVYlkCX95uPXNLjXG3il9fspJQLeBgLH
1kjdFbOUPY7W+5kR9yzmewrfeG5zKtS48XaV9WlrbEO0gi9IHgYpN4nWJ8To3L0oqX2LOdjjuUF8
PjosHf2qITpNjC+IGq9+awywfJCVS88czLIHjIkWWRw/NS8yzjbCSok4/qICNXEZ+tgV3dEvy1+W
4LbUAHoOwfaADW25Lft9SEBvpSrxXr2m+DUWbakrQJySrpjEfWGdsYkub2BPBiGwvrJPRiHQT93I
RXG18RoYXvlaBiOWVJrrMbH3TKyvsaljDQohGm1HEFo7UEalp6+3OrCEiAEjNkBkK/WQ0oBAF9ju
HraTSTeBk405kmJbuix42tX3cVjy8LD0FUoWJKZ/AJZGkF9tD++3q/XJFgYjeSx6Vpr8Afh5Y4Uq
fX+srP8NRGDc0TE0pWiLHqbseX2SwPOS/kixQSKR6P+sSeeL+uomwWRRoBC9uMBGXFwMcwHEJ0b2
raZzI5RCiCCBm5EyFqwty8q5ZT217ciVBbkXFetmTU3Ab5zif9HfDke7wth988cwmmz5FcBVAhcu
33PRwqBQRA7Ix9MaK00sE5HuuTXkwzd/D6yRxqk6yFlAhdCl5FcrnAaXd5I6UxEkcTBUxhKPqala
JhH/QCHAT6LlHZdQcxn5DrvEWuoF9R4S2tISXKdgFLxQH8oifi3BVugC+xqJ6Ws6SRcPC1cNM+s5
lBe36jO8PsYpBxAsRAbTFCI//QS2Jq5SAbXTHEOV53EAnIsqPDv/bODfoBB3gZCkKi4I3a7CmCBS
kwfFxIjbipJAPrVehQEeuGZbPJEAgYy5pBQ00Web7W2LBB3AC2711HZmxOUgnltMrsZHbwS+a0RK
pv7RFkQxidUb6XyfRI12p4CtxNcue3OwBSfIi/jKo2klcJdwZ5pgl07IL0zJoOmednV/BHVJ2Sl+
3QpQlQmIFTd06Fll9rrbe/WVtqfCd0MUqwDz3XudEoD8/FklN7Nl2z2++wOEqxRh8fnujApQjekO
kpHJegh1d+Q/AvdOr+vGLmNCL0BVSkThmup9//L7f35d/PPJaRdWznpT6oKziFL5hrd95aeT9++7
MTuL6DoZ2BT0Ai5adrH3Ve2Wg4m1dJ1Ig2FKRLxX8C420hiYgRE5URCmHPLpsrovQudANVGDXa2N
zVPPjn4nogqVo2DUApeGm6b0nrQuSQUeU+U5UUmXloBDigRd6CnxWrnEqc5O2Wbr2QUwlEdFWD1r
f0euxvvV5tY6EHShGl1TLIxuE5QzbZjYsZoPw4kdfaPVIOU3HQkaLNHa0HeIjfVxmOvXqsdaDdci
Frm/hqjbNQJjWOd+epw/8zZ7OMuzkkwCgSUC/5ONJr3M/eV9aKx9acEwIKoGCfsEugh+kLwJvyD8
vhOjEOk4QA+DSAm4fdYIpGQ35l108puSqsmvEu3ONDGBgGi43vea4tN043xgj9TyluDauiE2dT+q
1uwfCxUi7wWAkBQl24NOiffz+w3IZXNzRfc60StP/zlUWUShoQTf6D7vCcWuFYTk75rhxORnHbvq
VCV+7VqVLPtr0rFq52Zu4prdYMC9+exch8WlG9gJHKkDrJ5S9FDm7830Hn66VvQBQ5RnKKjfQ39r
qhLS8U4kSS+7qBI5JzbbZNJjkQKpyvlgMPcZ9WnoEfmzL92qkkSrhvSZ0vgz17+jY5Xt3Lvz5vOY
dB2zsS6uV+bqfnOH4id4qCHIwZIpl7yT1EIjkyOBWWZ10zliSFzldCx3t/H4WSH0pH2K48SJxFyD
jXzOlIHnLR8kCsbCJNnNbtB1bFXcmTP2DsD9TNGXIH5fFV1AnqOk3c4RW9XxG6JWaWgTSFnVRzKX
zIbWTBwGd0eEcQe/Zza46fK6Kp85fz8LeqEWplfWmcXgAztkLOoTHhHhEgXyPRTCYTBwXOhBsKcB
pCkVYhK4Bka2BkiUAH0+g3xQLXjva0hUN02mhKhB3TWgqJyB30jEYox7OMxaXy9X7Iij89qKW+Wr
7fLvxnFPw0qCRUPDV3xbfBMtEXzVsS4Vk/lqqULvI9BVSkRyHjtvTBo8VOg3OrUhNgm1wBT1Sxw7
sqhnlZ5Zmfa6CdFPViB0/rCd2HqsB01IdrR8WLT2olu07c0xIQJ6NGLur6RqBEsNnq6ezkaJu6rn
pNiUcUaNz9deI4K+gRTirKuTBX0IR4Frzte6Fi6hjKEdT7aftIwgwSlOvP60goK5OKxrgBlDQRm2
N1zUFCi7I9DL8PrBuN4dTz6uBNMMti6miKRrDkRhYFTcHv1lf19RZdmVds0G0SPuBLJaLpG7RwJf
qSeWKzRp6UkOH9O5VW/JSgwaPoK0c+/CBH8ZKR7sxOYA+I0aFBqRjbgwaZvzBgPDTVOn2BpmX+gE
ACKyQToAkTr0P+XaiZIwqC561hE4U6yawHytNwVXOP8ayh1zZqqC86Of8c/EfRfbijekdDPDPcS8
5/qrT8a6sszWwS5k9XlL4oZ0sbT9t6SjxRshluh94V7NQ9Azg0yxkQBhjxQ+goax517ehf+LxjUz
xggY6Hcvec7at7+0brBBXuH/wfEfHbA1Q6eLdrW90mkbVWZ4Hi9Sn1r0I2QrTqZGFzm/0a2w7+Xn
HqKefMrbraYSxxNDrm6HCz/OkGNxM2kDxHFG5epi4auGnF6X9lzcIi15N/PJyd7sTD980FlZ5duE
qAJrX19ZPJX2TI4riwzdOUCNbW2a/jw5MsURTPa2skhBsLMZkXW+uivo2F5duVuTdqNBLc+S4xcU
4py1Boq0BVESgE9xXdkSVSGuJ5sV6c+qBo6uO0ZeByl+vVldTi8X+5TC29NzosdOY6G/k+BHoKBt
fDxtp5H+tRMeD/So1KLneFw8G4nzrh/OE0RRpyrfj70EdcnP8VggdIfPKXZQKKLLR77xAr+Bmogv
/gT/MVsEzm3kkwDHGC3RB9BUEkmN9XbloQIlgrphJ51h66vYjRTy9WOJlFc1vHWKncj/NNoFvCRi
6CPp185SdEGDd+g9P1eLEXkHZd17a5iL91rXZfZBpaSLaGWde9PlylxRTwBFZrNbq8PQ0WRwWgsf
6fmVr+4qi0dDXv8EHEQxvoIaWKIn3ZWZC7dFfYcQ5xEO9pEL+CUlK2JwsI438g/opHyxGAsSApvJ
wG6uu5iwh3/2RgXSGpMypUfT3zwkill+Kl8wz9k3AeCHB52/2LNA4QYTTIBGjARQAFn3th4oKihE
YQce4E8uDzCZV3036VSmI94DVkTFzNjdLYnDnXQT3pG3ZP0Us+3iEvv4hw//y3o/nCxW10Pz/49/
PP1P//uvfgUTD7Qss8I8u0YfSe6j6aLe7lFPOmdlzcZ05azaPLWOE42RlAD/pwPqTDP0C3Iiwsvb
y7cno6K8m+7NAAPpUM23hga+Axba/W+UUcI8ND37I75KX0YhLn1s6g/Vf3/66ucPp4MMk9Tl7vqQ
hLPV3Z05vsa+VQ5yIU5G96ZaLFYA52YuH4t510/CmROp0k3C2vNvi/8/BsrPjPLv4GYE7YDx5EW4
3g+AE1n8UX4irs2SFnQexU0IRRkrPDDny1vS7BALcejSwPcZZQxHbt7lNgeLK5B5gMjZbDcOm85z
FSVf8FwZZ0+a8wJdCLsjLs6rsOd7OWm2ecsBuXjQx8BJUIBdsAUOhPctLwe/QyMSTDPoduXAomA5
sDGCdSP8p2LswqXhQiHwjt2T1dazRAsWJWtZoH2zsZtps52vdtuB0o8bMWpDyHwAwLedDYsPsKBR
2wHxnkArti/e7t/uj58PnwdoQzxlzHDKLwpRNRKluZ0RpNds12xXd9r5QHr8hd0nfECa7HzjXxBH
W22YfBRMjX5IkRgyXL4eQtiFwJtqppKGWh4IuHCILdA2tesrv3ZpbZafxIaGTLzn6a/aPH7aw5Gl
eauQWVk8aRBX2l8Mto3eYrCxIBJC4eMymrb3Jj2QmOFFii/Yv04EruR28ouXzgTXD39NdfvXBQUr
eAOjHSpYnOTyIgOhvNBNQ+umtOurYEODtcpOhbISraV5DaTqG4Dtsn6CpbmiaPgEz/XIGzY4el2u
oOMDhKK8gsjZjGXr9ouyZA68If0LPf8Jq5DZ3nPbeW6tApQvOAe3bZVRGVoHQHT0qpABeRs/WhFz
L6immxCjK062W8+xaCzUq7g3Am6fyYFn2B6wYfOOp9puHZ3AoZjR1FHTCMnvVg3QCNdbBIe1RZot
9n66uEXgU4tpAGKzXxzg30xntwOr4WLdAg+30wHHdZ2mNiK8omP7zJZfXjGNmdBW8I7Bm1Q/yaWI
2jRnnALTiO3HQfFsUBw/P8TJunW6nFkK81F9fp7yxE4HaiRv2u0TE0T1Hje4N7D7s7xX86fJzp+B
7f1gJhELmB0a8u+Fh2oQfQAW0HJu7tAbDTceOKYa3zXbBv65zxvZ5bGjzpz3oBenIzQ697TbTvpA
tXtMy6Fqi51Ep2UqcFFNY6hiP3WOnO7XcrkBJcKTjWyOlvAZEHEhOwp6tsi+71UnI6B2VosBZyb9
5c70Tr0c2rDYxHiqmAzpHkllugcPjV6WKtg1I+2EDn4oFkmLxkjqUmCEz6I6hhs2NtJ+NjC9qg37
h1o34pEN0662c0I4woek7QiXQ+nCemB+w/bDSQrXGU4C1WcoCaJFSRSxPUzUc8enuZYMSTYdYl+C
1I9f+xpxiLE6seRkKuydcSXbhfVaza7SZInDR7La8vn8UidXtjydtOOdoT7vRLoAl7jTOeocFd9z
VRrzlw0sWFTxpQOum3ZOoIOOcPOY1Did+gKK2xZfoF0NSAct8//KTXy3gYlB1q1zxgFHnxS9fCA7
6E16/cg/HKHrr7Jim5uFjwht0AV6e3XlB7OMI7/6qWuFywR+Ootdc5MkV6di8X1pJbC3AKnWPjLo
mQj9QHYhrFdDNz3I3R77IYDZZJfhkI1OQnewmO6r+QS9SiuJ7LjcgeYd1A4hTgNHTWKhcHUPKDZs
cznOAnpR/gwH034EQjrkdxjl5tcv1bnQQThx9T3FPgxk0TvNrsWtgL7tTXsYeHLvIk/AmR2KKf1G
DbCM//8mZnCysMcufrDbf6Q/vmhyw9eCAXp8TkOLSfMUHAT+ZHZTmHd12PiDjd48ym/08JHX7959
3kfM0XH4acKsSXvQPT7+BTTMYdpiPq3uVkunEEmsSnO0gXfbnjEiA9hJeZlUF6is2P8//vzbycmb
H34Ogo1cKvn5t5+RRiwyPTikZvM/pfd5lDuxV02Hgmw7gTcIlff6p9fvflu8/PH1u9Pi+3cnp4UZ
zeL3L9+9OXnzW8B8Ofn+dQHtKl69/qcPv7XkTlRRKmZcdKH1EIyLD2JjiKgCaBQHlGxgjSNeA/ht
v3+wzfPjv374zxMLH1kvP56d/h8T1I6baQKuNaLcnmqMfdjn1psV0KmOEHXOAmEPis1uiTFXi9Vq
TVK/08u6XwT1wF+Wx6sGFbcD+GrHdgJaWRwyUCO2lp92eNn8SU78puCfP9UP9bLDrT/BxKrpWNwH
c3q/qgGhn8qC35gtKqaD6aWLTOMghJFzcaUmZoJPyGjy/rYGB+FOx7J/znbbeb2JYFKlQIuT6pBJ
wXW0eqi34GBLxnUK2kRtN8aId17/4eR08vO/IJ80/j59/f70/Q8vT358/QpRmfHhyZtTMyE/vD3F
hy/UwzcwZ9/9/M48/oYef3j/8rev5dl/63SQNnPD0ecwwdDD4W4Ne3H3386mx//+8vhfJ+e/3H/1
f8puxvA40/l8hRa1EgGVRAylP8ABAgHCwVlhtjOPTLPBP7lLYFMQEbpGu/aShINPq5ocXyk5Wvyc
ntjcN8ZdBLK2d9px2Rt+Za64ve9/9x7+mcynm1kDv/5sftz8pceL4yioEHU/fpi/cIRbia0NuSzs
ceqvzFyHS4vEcTaOfAIUzroAqiOMGug1VD3Pul999RR77Kvh9mGr89h9zKVY76GTzN9fTQToljQG
R9Sc681qt6bAzIakaXxSdikyeAG5YfLiSgGvncrSZpAltqvKGapB7B0/QOcdH8OURCUMgFpg1nEX
mUUm282uUg1LS2dziLXt2kK6UQKg+qEEwh2w2IP9jRQ/aH/FSyKxBWMndNmIlKj08d30AZL2CGzu
03Qz7i53d/FnvaaYVuB4mUvbgGvM5aj2PWurOqGxUJ3BZQDyIgIlwFeiDXmY7+ljuOzPch2c/qoZ
VOGDQWozKAClukFxP93AeDfmtj0Di7b7fvuU4W0Npozbdr1ae5XmNKvlYk8zhR8c85PWtlAjIKVD
RgUwmYE1mmM0KRNAeePtVWK9h23gsK8xKzmSzKw3FXpX6jjUBjr0xky9tdm/wUo1zDS+e3xMbldd
912Sw7tq2sGuHtWBvcfgXTGPqGxKZNU5RnNTNe/L94+Ke7B9oasQ6Wlr3CWhy5dXx6ajjs02NrD4
zXu4RRMwdoPEDIAOyyVRrNlufb2Zzvn4vK8YHTY5yMsrOsp6sjLcI7U4UKvT0eiE2AeYKDXoi9V0
Xij47h4obhdTiLRFodf6iZkSHp+5aDym1qvPdXF1CCUHprmWfVBQsaftcxy8AbbV3do2Xh6ETc81
2Ws5ZC4g92ozBQgEe/TRYQ+DCjU2SxubrE9X9DJbA664uGbwqhkX4KVWglvHGH07zNjTutyM7a8B
6lDGP+Aln+lLx/yv50eBZXHRY/7XP+YdnLgHJM5ijSU44R9HyH1jOvkSVJD7jtNYwkSlfh7aw0F5
83kJeC9GCQfrAuqeCQ8s1wM2jlppId7fmpNuC2xBSoAC4dVM/Dt7kbJ0Le+9wvr6LdYPdFQ70AKz
CEYtWdKLCgWySIKPLAPcqvlK9WI/4o2QEp+n8gIvgKCqc/3QviIxQeNGj2qq2Bc+CoDpNNuDUV6W
pXlwP8CFKmGXYA8IuJWaDBP0JumfEaRsE8GiIgskOgsnwmiim20XZVNgkAPkoidwHwsVw/lhcsJt
Agkr5yrGqZjJQmLfrL8Y8ln0HxuaWy5/UssHSi5zzP8e2gYlzAeN+BtUebnaGtF2Yv3ypJIDfwF+
Vl3ljhGaFrhooOJT2lNzuzWb1uuHKAwhPRvgqgoXzFExm+4AxvH92hyiEK/uCvq1pwxK0klYdhWU
OUi0THw+21B17WLCu6U5s73LO2XFtQlH2OwGgE79UCG7KL8D/9IDVztHxecqK+s/1pK6VozjhuUr
9jy3he2WqU0sSSFRLQEzetLcGKkBYao8OuqoMv55c4cctRMYef/I4fzJswCT88Gls4bEXaDvcCYe
/6DYrswBSAZhkqDY58fpPKgafLvCqcRgJmsKwnTGz8R4OrEvvXcnsrCKBRZAnKeTxFMu/fYGfc5R
o5K+9EtJfE5FFfqMXnxSqztB5DsBeNKevwIgkYFHdFl7MDEEfumRqluNAAnoL1Tq26oiEgJ1+qAR
mkRmdgHg+C0PCehISsMyGr+ELQB/E0onx/ItqqvtAbxe1CsUOeSRcYheCtzCH0Fh7LRGbHnfUD0n
3Un+4apzvai+YX5eTWTSY3zCmGJBpJyx/Oh3EhspmXfN3WudMnNLKnvaInVRmNPbFWiyqMlIFyc7
U8mf3hdB8WawvVEaNRehOIEniuNmQv4yV9Ru8w6ZmuXeO7HXO3g1XidL8v40g8WRXWacF7t5ZRaE
+5r52CdwZC75/te1ntMu9SiIqaSCJUzzzFcjPvRxfmBYhyvCD1PHO6YNq6LixDTwz2b0325WD/s8
t3GT6GKHzYJvwViEPzIsh8xueLjnHUxKwkkIwIX0rOVcHQ8CEa0C4hw0gWSNA6wLrBCKN9GVDFWT
/MJ3pJraTy1WV93hGirArEZNCjCGsyTqySCbeE8CJ7xqs92XqmMQLgx97n0z4FHh3G6J9FQEdQ8V
LgiqEBW86kPnTkjfho9ZI9IbI0qyE0JTag153qE64L7BXlySzRL+iaDlwIWeae/or8DmJxSu4+LP
xMgCXpm4Q/wlmFuwP2k/2zYgXY+JkT/xGeSLgirE9c+CsbvIk6TXNfHIW4cw/L52247bNChwK4nM
2boJpqswkS5qXi1auieOtmDIyjlco6uCQzytOxQoCrqeUy0EJqecLi37qFfNfiffkWnvUKQaEOfH
RN/1E7gFsHlUHsvYZFEts07FDqJeWqOd6DFAMpMRDg989RkOzHplESUCrBF2Yy7tqhkUzgUfssT+
FqhWogRQjNX6oDAKOiHL36t1jdtNxUzWLkOzu8RyKuY+MLvUYm56eYDFEFuXsMATi/WwzVSseW5Y
TUa7Lf8hMmxgRz4aGVFtt6w/7ojQDSGDKMSfI4o4JpqnTLypKN6Bji5WZZrZRuOektmUeD+Kythz
b+JBRwPSegaCfMDzVR+KqkhRRaH0KyxVwMStg0LTHMOqcAmi0qWjaQpvxyQSGMF4U6nv3E8b6Q0A
mwGzZwmRqeyr18+c/PZAwS+D7xC+EYwerwqyRp/KVh58DxTxBE6TDb7xziG11GTCYIQGGvWuYXoY
sWkzdduVkf7hfET9Ex++/pcwvbim0QFdOWpldeYCqyAh2QCOOWiTzVXvqp4pfm5C2zdJSMGr4qN9
wEqGlYfAunVT7eYrD4NSQdscuSrC+iot/SOf2Hb91SBQhBsObBE8ag3EwKCOHelN1yABUotMByEq
MM9sFEy0v4Q+tER0vwbRZ3WLpfiyEtkaCBVUKq1qdcTV6tIo+ddrNqW94SVgXwKZI1j1yS2hmruS
gDgQ7Fxu9xqqkB4Mh8SF6AlYXXrBkvj3HGAZJsLnnOZE0EbiZPKKU/7gcLbClD/4CLs/1Mm6/VDb
mp3QjS76onncDegzKTKJ0NKS4jWFc15HMSa+N7FJ9OtxWl4MBGnyp59M1zWYHMvui+GzLhEswxLE
ffUJXuWdoDpIAFV3neg6We/lCgaEkXiBhaBBnJ/oPcfxV6liAq5uxNf3mydC+KL5jHPaNOHJBg7l
0g/nMb/QW2LSihHo93YPclgYM14nBd0haM9AIADYV7AJmXVOTAephT4tRqNj4ceaixf5vAJNL+hB
tk0oA8CmuTEr2vfUf5y4XJFKHYAVrPOZOfhg/RdxmOdl8n714LFH3CaanJCl+RtAhjDqSjSjkwRo
yG+mzU12yOGlk7+AoE9HSezWYTYfB9mC8bemmtxVdyvY1PDeyCwgpv8cqHmAg1Y9yGvwHZtUD+g7
Js86GrSeGCn9ieeyJ2B5JVNW9lZBBGa14F2h5Exnz84HUsDZc/X7xXnW3dg1tZ+cZH7Vbdq2ydko
EMgyYcBy9TeiUPWQ0FK1Wldctzq/O2XsCqZ6ruO5lH7Lx5tk0wGWtEkBhLoJBVeQ2c20XqZ2hQC9
hUFbPDEYfALQ8g4f9tuj47eKzWqFZYQ3CW/3wIr4YEesmYTiA7hz0nDmph4WlQfm4WJRsxkoDyjn
pvpkBE5topBDAF57EO98NxXX0pBhDG0FezMcNazZPeQAP1MGwaBIA/Y2pSuFzXlxQakuLgoWrHV8
jybx/spimg3T4WnaKRFyWKdI8/tVNVshGUQmokqo1PRqgLsjVSVYipbuzSuZy+i3Rj7Jd7ycyfgn
rW9IRD75PUOUb91MvM8Zc0AJfjoz0ml7gz++wfYDo3uNAW3+iGKPu8uSAzvp6ChGfCfQJwylzeUw
bP2USd+84fw0XchBaKPLQNOX2acheT56tm1qnKBRNz1J4olivjMoSi/1wBbT72fDHU0+f0NC+f7W
u7X5Pf5OglE5bBB2puDqxrD9qMyYLve8cTkEGdqy1e3NXdva9h4Nm4YD4DbQfmhZ14WLCgp3nOSt
Mdpp/AL8HnJc97EccvZAigyr6Q/ree5Nb621zohIh+q6HyG+iE4XJm/wLXaXlQDYoinOU2xIoXO1
S55CAtb+z5DwB7AaWgohYgY3yjyPzDmG/TMg7vCHKdwZxdmOL7AgxfXTW6x3xZ0Q/C0Yw30qhat6
Sagdfgg2489QTwEKTbzB4HYBdE9Ke1WU9RKMOLjvgXf0QnHcEfoFtHfpEC9ZA0bR2ngh8c5fpgtM
nrfyUiJE1cLnV2HV/SL5V3jcyrFKb5Wku97slpX1i7diEXnBZOVjvJSxwGR2NT/bgChXQ8Xhlaxy
N34Z0p6sP8rV3TAmek5LqVIAatqHYEefbpG8IdZt+0byq91igf0RXJGwSd3FanndbXFfoWtT0KmR
UxEhrnPUrdQUALxNBcGnHoLlYbM1e9mUwJ1Xotj1SFc42sNslb/xWgV1TRs90m3eXlIWxIK6McdS
graEU0iCA7jGbA6/04IB4maXDFnfELP1Y8D+phL3aGwFf5moPe7t4wXhuDJHMBvc3Nw2tQ/numj7
rV6+fOMsarCNOIW9zNvGakdYY292h81qd31TONcO666H0A27htm/GBPHCIQgzjqrgNWgHbF+ekt6
VYFwC1ypm6ILQTCY9v5mtagYkK/LOy8X5a4N5GJpskxUmeOi5FCaAfeV1xUkKlrHtlD3seRwgcix
eiA0ZOBVwgokDhAb+sFu0l3ZS1SjYIBtZxOgIzsWy4WqH7k+4ukVaKnShxCJx29WW6XwZDl5alaS
Wfee9U1PoOwGS20wo9Xg7q8rwlmHYdBmeo/E5aBHJBKYACbG2x6TqiRzzTWJ0Ev07Nl5PymxROdA
cALwXtEPNCstw4hzFuxX4JMNiCIkcaACfIbIuw2SCayurPeQXUN+B/kWZK3P6U2sgaM3KBbTu8v5
dOSshENboAfSdeBBGSlTxPISAZNwSWow7DOffVIntb+Hsx352YyzfhBw91N5VeYE76T+yNL/Cruw
4MeCCLp+yst1mCmLYUCs88IP790ean/1H/V5+WyLpTWK+V463GVm23t4eCB5FrcB9LnCaQb68/Xe
Ha2s36OMSLriafcgwvYRM/mmWrgCzB/bFVvocgNo0oxyxO3mXSfxGC+yZmEuIB5hBezb60HRfaqv
5juAq1Ddz8Ly0Pa4tg1LJyc80JPeRo9phVkEgQmppcGkFNcd6gW94A95QqSMBihKKEXaM8h8ljoD
6dmeRliXUjj/au9Bq+NbUJMVFIbq1rwXgOzu1jOVDK1oDeYzjxKDJSkUMKYFQWRyeNknoa3ESy8c
SxVpNBjyx3xkUQXYzGhkvqvBR9vc4TCuCa5hqRIbJ3AEfozOG5JPOfSrvVq13XFlEdOClbM9tqFI
DGdYVrvlQ3J9ju1DsX5iIaod/YAcQs0xcoH9uINw9gaM217/2msZU6x41/sJeOdMJnDPZefmsjtR
RWOm7qD481/6j0NZ2cVHhBvSGHNifyZ0lS3oscWlPhGjpMe1iL0Qg04vZUrYXGCnAPHDPnhxHgWV
6NEGZqFw4LX7j7yzbjo/gXYlElRRAkM4ZCKtZygz9rTCUkhxyC+IX14vVglTircAXszKwzaKcQnh
u+prIwaxWsFmcyFonqwzmZA4P5mYvuix+rjpIR0DKGq/yToBRQ6kqu7DEFvCOz77/kncfs/iYzo4
SMa+KkWvEj9EQXRYSjk8tqDFAHpRIQIN3BxDSBIVQ2LjwJwiybo3A24VREj771F2cY7C+OcQbCyo
B47jcLUtlSPpbaS6djdOBNsHBUhoSiS1aARaL5xAAs1SZ+2jHuAmie/+7eWJvyk+6UZCov3SEsvC
H34F6NmQx0D0Wo4ADt4CmNO0odjwJOSMHsavw+g7F4Gop5Bz65Zo8Ui6kowcex9857uxJEhFH4XT
B2KbGfMCXWSezG2YOboMpllQwo/2VbymjCx1Ec6EdOe74YF7YL21QhB5yMdHsTif0+fZiYdd0JW+
1PcfEgk8Ls76q9u9XI2DnjxBQAubUUHpQ/LAdbVEkYNWsu95HowuPMpLBcKQxOq44LtIHWk/5s+K
dBAQ7K8AM4HEDHr3GnQyW52qK9OCjIOvRWFpLZ+3hNSpfdNjAlMX1mxvq7Z7KxVDRZNqQwkiVQPg
hY+63bLsBt/t0mxgeNhEDtpQgZDB3Jy8tS1b6ZZIhTyLdTx1I8NSkGSDHIRhER6Xlv+e2TrS8bBS
Hs8wiHk3iWBqpUQVXQkxl+MfrYkhwgS4aTBloIRZ44HiI92UifO0Tswo3rLpn7H559FhOdbDIoeK
HZlAwUSkDBGZEncnKk3c5pMuA2XEGpnsuuVyRVfbJ8jFBlbG1ZX5o4/bqpQYK6r8+kivd+HwoS9i
2PKTxhUDX+zHBRz94Q9/GJHkBTrCxAEahWCXX9FXI9gzWXjJS4wZiWFI9J4KkIV0YJBJhcVaV/Va
lzfKnz50x+NAJnwidUTKiX4/y2X3qF72ilyft03irNlsm2iLgdEejUL0fVoniYUi+w0ZQcjbl+BC
urjb5LLkN5z8Ncsz8uqDpJ/uWmKNeUjdwfj+kSAhB1aSKTHn1XO6a9wC3hMe/3gGNqt0LgkHQZdm
5ANvGmtkAKW/1eKnJRm7DGWZ0IKIQAP6/c4hnYrbhTuJohNIx0UyrW3bPso3U7pwAT/cs76vdDPS
sbl3zm5L0wHjEAJasRZjCYOiWy+NZFgjHZbZSMB5FJobmwRETsIvfDLC1RagJsekMi4eRsUDfxf0
NubDB/FRmyEai/Yc5f9BcXlFVi/khw2Fn/Y5iDiMgVjWNhHjDYX7R/Uit6aT/zpegtEHtszUhm5p
TaJWUqNoflg0zUhsrbUA6AmmkY7PYksqUTjRPTW74w/zAaxjHWKp5bm8i03sx5Y6fie2l8aeRjsM
UeLp8SVd4qJsfeyAv1GTqXHBPr/Vh3pw+406X8+2Jc4m+Ha+Z/3L+cHzIdX7FtgnOwStofETc0CY
Tvlk1gzCW/HwPPgQsKIeDbaws1VDl/npJd6Uyt6w1z8HP+g9vfD6FKUe06kPjFEJiUPCWoasMlIR
E9feTLc9cdYjtSqVAyrfIC/sBYXl/yEaLQHtKu4hxh7MsOB6ar4c5DWra4ZAlENgG2D9E2WazrY7
9MHEomvY35ExlZy6Qp0qkjH1gPd6vV5xsMslkRWYQ3ADKoMwSCrm7IW2EixYZAEjyETasShwtyUc
VDzbHxcI5ojSS8h0oHULesJaUCLuugwA5qPVSPtYgH/4vJ3Vlao7nC1WTVUG57fJjg04e4EcgtCI
t//y28mrk3evvz/9+d0f49LCiWxWEzS1NM3unx9QYclv0p9HTHerub6/iiyQEhuU37RVOtCBQ4j8
rITBHUbIMxBBGZnvgnAKEj1B5eyJHWlfGVoeKdmVlTnB9hBIMnxr5G/xwgapt58wdMnFzxqjuk+N
aEsWqUg8UvYVkqC5HNN1l02gG+WDUu3C/UP8hFJtxw2vQdkdOxjPAlyKhbpqHeIvpEvJZs1duCDz
10Xc2ab3GHkiVoKZ126+BSLNwCr6Y0lG3z5ctm6c5bO0HViIiryx1cmWiuyXGBeNCfuPVLAgLRLg
EXxXmNqa7EYWprp/tgbA8oYmRkcsLEHFz0bPYz8SCqTSNuODh4EnsTXIMMQ/Jkver6PgUmUxh4kS
cg141n0wfMplhTI8H+kdDK7ZMoSBEknu4/L9UYt3NwWVKzrIfnLTzzQ0UR250S09THmlrYy5YWMa
Ba9WzrgVdHJSJxV/9xENxs20YWfteWSViW4jj6k4zLfYT3s8ThCG5fpMq0PUjDx7OB+4idDPE1v7
bUABMr63gw/odEbiwKuGbPaEr4oxCt2y3yVnATQeWmfC3HTQ30SkrAqOAOmePnLx4gvXIfCJUYYc
mxtp1j9MhvJZGIz6t+w6dLX7TE2lC7OST2vbxVJU9TnIE94dJWWXU3W+bFXGDYBSrZsZQTbFK4Gu
wh5CQvZ+/h+0Gq8IeOJQlaKvJnDjQCX086svpTD7nGnR6Xz85cP/BoyyrA8czu7mgFP88fz0v/6n
X/2Kw22AgVR+7i45qYLCRz8c90aidN4C5cWgeHvy9jUDAlHhpfk3ZmDeLesZhiPttsA1i465AJiM
5EMmR0+iISgQWNy4OdqCzlD+wBClG8xP+jsU2RXyPJkNh5bD3Sr66C5miv6EusRl0as2mx4GLaIr
jDjdynmImeFjx1xtjq2ynxoKKhY6HLsu4qsfxFeRUCmflOtntTS9YT7zVPqFYsgay5ZxbT1hJGnZ
p+qbiyAAIAzos9Ot+wrK8YOi9+H0h+P/u+f7KEnNxqqaQxxCGK9B0QBNbsLP3NQPwhGni8myugdj
QMoZnfhDxrpkMysGTG4aPqe1Biyo0LlImsGz03Qq9AeRK4nIbToDagAmP9C5gvD6bfHNyBwWAHy9
/wZ0vRCmEvTkgF6/gJ7JG0WF4kk6mVFhk71/VPyJ0Lbvpns+XT5V+vZ+gGdV5ntMwGKfAikOjaLN
DZwtY2lkib0n/ZbIHX5HxT9il0s5yD2rygHk0sfLsdiiknNt9iA1ZPg6hGp6Tet9tfwBl2dJqQaF
/IvTUGaFhwtnHoo7UViI2dxwgQ61W3fCp8d+DDUm6U+6Cls3KN/dJ/ZHoV5gEFD/pfoQDbL9M3Du
uQNxyPw3gHfBYTL/9R/TLMAOUZhbLVTJ3aDHRuCE4ax8pWrJwFZoYOvAHB+44ftbqbmHVkT4gZJT
D1xy/FOGxqTX8bftcTiGjj8leDH0XbjiD/QyuQjkAvJwDXhhlv7KHDgaQEv9AUQz/0Ys7Owy9nFy
evF33lnpKsyh2nN0kaWf8q03pg5q/ZM3KCUxff5xZxZUEMwbv4Y7KT/l0BeA2zB/E1ZaIzF1ksHp
sWyUapg9Gbvh+edTUcBHMpEaTSa9fsbDlFIPddqyn2fRUoVb8LveY0STYsEELajtpH5xt2u2HJnO
5aackblW0n7XuTpYJ05mIcs8zMZwU6k+phyzzGNUWXyMX0FcH4oXrKOAEqJPJGHpAr9ePxPcIVIZ
aufAe6fEGi3PF9+F+grKeBzagIOwZGjlWX2eNp3qZtY5VS4Ulhj499vV+oTivDQsmMXCu97eTG7q
5fbRLlKNdisWPCXG8N+WdWreljgRLj0SVCvGy7tIZfGQVVt7t13PfKOqtgBvPfPftqotFl9WNViG
DwdakjwTj60emGFxp6MfmUrypjmZ3eFVy/yXCDwEzwyeTBvLfmyWtoKqwYJt++B4Xo8tQS7/4qU6
fqaabwrmD3r2JWKEzxsCADrK5MpaPqBOXHT5MCj2/ZxeCbtLtbx8QIyZfeR6k9bxft532sqHCXQG
LKllRTF4fSPZ8C+KLac/9FQ5b/er4R7Km1Jc55v/RrXBCDI7Y/XQSL5s4YshTIiS07Xh43PKvu9h
i/MkTBiDqzw2SwTaQLoOurGcqH41fbmIzDYLvXIqI2Cam+O/V0uISR0HDzIriW7d1dbet4NcRi7T
Hi0dC7cNYqP8VrX4p2lTWbnWpPH+ztQhzOPSu3J/i7Rd5swEJgOTxvs7Uy6dul7KbIwsgiw4sRPG
CcaQwxLAjxSeUfSO2WTAPQieWeazrXMPIqZFvCQKycuCsDqL79/S1e7F8L8XFSAUwKiu7s13+I1D
QKG7qA/ngYJj1BuBAOuihnudDqE6QaxHFPUwUNQQg+Kn6m612bPA6hXfV6Ng2anH9mfLMWJ5tY2Y
k0QTQ3HNvATOU+avhIjVxHUcPMLLbwaF7McglxfggAZHNBFimkXzD/h8Mob/CpQ4zWoJzJDIbcRF
hNDIe9MliosH6DUp/pU6WuEAke1QOSOsAMaFqi/3Vj49yANxHNOAKk0i5rsE3Vps/2M/zuyRYfLC
9ezyT8N5Rffr6P6dPweymaXe7SBGWO+YeZ5KbWg8IwHZPFM3ybrBdpcP8aRQH3qQ7tEZscsfzQfV
o+9BejVwWCAwEsO/HR8lsr4Dca5MQMwF6GuSAu8XkMdcLjxwVykREiJx1OeU2ZUotW6yTByxzywP
8qTKg2WCDlFXJt31YnUpwQCL1Sw1ezFJelZyyDEsW2w5ALuWgR8bZR8XFJE8gT+NQBgGp6kUhCMR
KLYWhRemjVMUsyXrxQXip50cuxb2Z2h4V0NnRZq7ZrXbYCDSFWjfpxqQMRlFYJKJu4cz9DFOKTrZ
YXnmhIUv4z6FzH5bs8uZs6XeBt4CuJ+VsxWPDw2NGUK3tHmDm4giwRwCRvK1+x1uYrRVOX1cYmtz
f+h1o9PL2pG1JLMHo+jpZ/aM+Pylj0v4sxc+17f/16/s+g6X9n/8ws6F3bat9P7n6Irz5cFPLDKu
MR285VcU05IglACZ6s5ci++mtGCJzxii2L655dMa0aNAuB0GOFOwPHtFT289PfMUbTf0oVAdAjno
DTopY2rVCRjB0vtl6RdpHmeLpCy6SEit1z5O8B78qwpBI6/Ohe/7oOJ2+nfvTnCfdGmCdQaIweRN
dAbHqmY2oVzn/XaFlrhaq1swMXpC+AWEXvhhQdPtBAMZQ6u5/WpcU5YOJGfChKlY6bUFOXyH7fNf
q+r4egOVz4xJcJxNSD7hs8UdZ/LHoTMU5cvMBJ1MLLDGTT0nqdvrMT4qIYY4eSgRddIBp+UUcanx
debApMMv+yUJ+PbKCY5VOUGzhdgCwgpAJ73wOlz6ulWIf6btJ4KaO4PMiK+4vVSD9Ghfs4leLhNY
Qk/lAsVvYCECNEZ3fvJNwozvY/VprQubjlWujszJTBcdUCjWDSRrs/qC7B1TY+FxM0cESQHlV9pR
S5nFyQcTOJib3QxsGwC1tmfZoZqzIdlNc+WF1XRCn+jA+Sk6oCYTKpfBTKLDKOuA66gaMyjC8YVG
FLEmPbWhOUOgU+pbGcyvLAzcx4sP/8Vy11czgGf/OD39f/7zr35FvQX4byAQWapohswiLnMBcazE
FsQYBM6JQdtvVNA2I6yB/QK/WW7APtAoshJrSvk9f/kd1qVyUGZkhWcTP6oizIRjhHQ2IHxVXFzA
sQOm7Wtznact7+JiZPVFU9MMbpvz3kNPA84ytAXNFtV0U2Ju/GnRvaRv6Jvvq6q42W7Xo6dP50Y2
HRJ33HC1uX66qC+Bz/epZBjebO+EOJBgASUqCnwmuFZckboK0E9y+8mLwd/rWwr1v62ivZ4u5oSB
BPuYrQ8/OlPehPZdUwPCGAMn9dhArfYSMrU1FcaUQCmhH2/iK3DtsBVREiPOBR9H0yuavnpvJo8p
IZwgljUyUQpkGcqffZ8dspq1Eylzwj/3HEkB6mh6oyJ48hcqKHiKkqSRDZMiIoLp0CaD01oQxS4u
IFckV15cMItwfX0NYzgtXvHHzFzgDvGnC/f+nbBcMDy5NzEWzl/UDAu8mlQP60U9Qw2jCMReSYiW
oNL13CXFe94iGS/ELdQXv8IaRCXoej5SvaBWX1yZuA5x3w7DfvP+zqTnZDE19AY118k5kCaU/dy6
PF4fFmoV8sQR3Ve8bIdLKCQZvMTJbmYrC+dPgAVnLl5b853Z3+MpDZ4TuIr63trdgF6ePVV4F5hz
jqxnCqM6DuxGP0BBmmJlwOFqueIo7MBbkzMCcjj9CvxKuDi87dPPAKOMvwLDyj/9BPRxNOLAj/gl
v5IWh/tfpsn9zBzJ7AXqi83W9yNHo+HN6p7Tlwd3pWc99D4gqy4YvRip5PBhOwCxilx7FvNJe2vS
3qXpFuKnU1FW9h6aCrb+Vhhmh//jSysZVSjwmgpKcKc+r3r1LrMv+Ll1eqcFWa0dpPWYBzG4Ypo0
Svze8IDLtwa8PSjL05VzsvStP0x8ex+y3vKcSoTZNsL8VN4PXc/FGNahSwNOUWic4sg99Cb2bFDA
5majiKC2FOiNlyNXXUWGhcIOIU1t9hEhVkqmO/IkrJURjyX7kOXWRLLJhH9K2snEpnZGK3wQsTVy
rUmIO1MA8yJdRbQ2/iYTTqfEHO18vPzwv8rlZGd2MvjxcXb6w9/R7WReNzNwy9oTmTbTa6/AEXJ+
zLJ20ZWMXQaBRoQjuqfYawoxVwXXFUIsM4O8bK6IaYlJ6ToegwKtXBkUSS70lR0SBetmIvVQxj9T
h5MG77NTy0kG9Zekw1Pzn++nTfUbmfbyhq+EfMVDCLCevOtZh1CbOlJj8OTGiNXL1XyPOProVwuk
Up/MeozqkJRNSONsVxXe66Oc/Y4P++ebXQN/VZV2lHHd0VJ6ggZs5qBEaT90HV7nRkJ95IN5KzX3
wEfHMxfeIGevl1iiIOANl7tcCZY47w7I4kV8HzC7HOo8YMuB6oseJDAfvQNjuzIDZaqw4Vvwardx
INPzFUEPQBTzhm7pHkJSWFo7aRVxwdH6vPyTp9NjiRv3sd7E9usEwbrBBogDlnYVA5szAXhrSqPd
WknyVK55+mGNfdpLMJ1QnhYuWCRwcLoZ5tyKvgIvXpkX2Q/ZnC3fMl3k3TYlTwh3q2eN4N1KTR8D
smljLRhinPLVFOZobaMWZVgCS9piNZ1XGyf9ecv2R3ypOs7KioJzwAQWPOeJI7HvzZDMxMFD0EX6
R9pspVijKsLXpLfeWBaUob944X8PIRc4bkdxqBgsCDLPS+oHMnDBix7gJ4Tw2N6WXnL+AWk3qWdS
wVRSaaFy9HcT37M92TMO7UHdw/xUwdoiOL5kP/TMS6hRNLnFc4vztnudpU6e7vYe7JHz4XZTTxd2
InVTH5KTyZ5JYAqST/96bF7YY2PIFW4Dk4m6WLXShzKRHTvKwTP4B9/SOHG6Vx9bOLlHOuzEGYAr
jz2eQdP7peUWDCHh/HnolzMoEtkCUHeduod1m5AKNI9dCYmHOmlpq9JCxNedsEKtmyxYtG2goVvw
rtTo7SzHdtjWHslzWJOC1LpVbvTAggZTgFslmfP0MmZD56kguKLT+xhq/8jssveb6drcbrZmzIHj
V91gYB4BwXjBC6XAhWIP6MtqsbrXRpl7N/lkZriHcNS6v3pehfI2CFei2e7BhD20jn/A1VWmCmm/
uSZv1MmHolrkg0ah8KPO29F6lF+lquF7kroY4O6bn09fj4qTpXIqdF6T74TAYsrG+2y4Z9fcI9aL
6Z6AlInmZPTL8pdlN10H3jAg2rHsskV60ccgWGjZmISeGPuPg2FUdjcGg6INhDmyamcKHz1e39fv
3v38bmTk1tslCDOZzmvprI3Xr6af6C6r5md7Vzx+Dci31JJ0JnpwlOqT1ikfbsUJMPTexC2zs/O+
aKnsFNX0Z6TH9LeVzG5B31M7SzjtucgfPKKWv6ZQXSrw5sRFVtOm3c2Gex2E9pJTd9IDA0lyAxPV
0rdsBl36sMaY9cf7QRow7nYPaAMCKaOTBTck15KHtjn2WU35sKy4Me/J4JwZAb8B9ImdzcvGaox7
g9T+uCYLzp5sgBH9OSchC2ntok9JAfkRftyh1G6+LDZsz6fGuYXARrhefBcOgU3EAJ1h4FFC+hR3
oe350HUZ7poU5Yyj0en8oyYpNTMT9Z+dBJA5KDIYVQD0GQO0BWoFhnXwo9ehRBt4WAMTh2xjAVOf
KXfodkkvZYhtmT3cwQEVCwrQYg90tnPz7CgtHaV6aL1ZbYG5iTtgMkHWEPKX/+KO6qVvMejMpm48
fvXD+9CZLYXN/oy73vMj9nhDE/tMgfcefmhjn4d8Iw1YjqvZrV1uk1pYv5oJVpsRWr2ru3MHm5k1
r3m/JhjPKj765m8AYNN/XmYIFWbTNQj4v5vGTv3aa1++kPfFUhcILfW2kGbn/fjVV6Ed7R/l6Ukp
x4g8V9r69g+tZylFqO6UrusfLhwH0yEcIDc2UnYe3FWPjPqdFUdbzFJfVLN+sDyiWU1bNE7ExzcX
1hV6C3xIzkZVGXpPJpzP83UIGsZIKocurrmZV2MjENq9Zp7Qrrtwsn9frashUqdcTQmpEq59qI+w
tH2NS+5vRDVth5zp5B3XwQV0uSLKcIcbuPQsWJp6ihT7vN/5OP/wXxCnBsH3Vnd3q+XH6vQvz9C+
0lEmkhVhOCjHLqGehC6rNrwtTubIfjJZgZMBNGKJoQI9Mt4ZIbq3qJe38O+83sA/6A6cZYQJcGZZ
qcJwhJrey5QW4TDkiBG1pWpRfUG2+WqbzKnshR4pGxGoNBDvhrCp2kE3DUQtbsSb6zBvCuvRPCe4
wXxVMHhh7If90ph8QfulXcmqZ3sh+j4FWEDrPq8g3SWIsaiAHoDE7qDCKKmfncF/DytAEvtFmLlP
FWsrQzRdOLuHNotf0qSCDVbs2YPi9j6IOyddMh+v4AFP6PkReCbCfybB60BNFsZuZ3diTqwVlSnV
+oESH4Phno2+OYd50TOzvZc50KX+SeC61lO1tdpn34zO08f8gU2IQlBV2/CreWTfIC4hWXR3uQKS
iRltr8X0kzmvMIgIxh5VLgWbE9xcOkBaB7hrVoYA2fH0HqNsoL5983Ri5uBsZYTs4rvieVbAKhHp
0mQpSVjqF//Gw9QGaJgAPDhInLtcGTmfPmS+g3/ht/+6zzplnOAYvfn59ZtTJBazD05fnbzTT/7p
w/s/9lPuO/imuKpMQ8hXZbmtN0CWPFttAOB7kMhDYNNNcVsv52DzX1ZweQePBYKzNt//6fWrkw8/
JfKyiWaK931U3QExgB8vnbKnkQSbOKOzvS85b+/znYxRO3hlxv1g1Eo80DoRFA4qbgnmWPPc476s
ctBVf2UFPWgOpu6DaPh3GAwfxdDThZbSvTUrE+L5HbSNOEI1N1OAFLdSJhEIkleAhzFeUFbn/CRC
l+kn+allqHn9yYpQK4jQzxxkqLnGCGVM1LfxF7sKiwDGcSoM+N3339xmZIDAwcue+IBqjAFEcGDm
CZHNwFzum3U1K3uStdcXCFwnPhRCzFnKM6HupP9a+cDUeTWbTPqeeJirLL/6grpyTldVKUrVlB/5
FeWHiXquzY2krWfhPdG34ocYhgqrfEiNdfGu2vqprrt+7jdAv0m0IgGKBJVHMFbwWtD1Lkqz2S12
c4m6ABn3oLaY0lwTKDbX1tz86VfYPEjPCsVO1x65qcKGLGcDLm1pzGy3QbplfAYLS/m1oM8doF0A
CtJ1DTAX2HYbf5ihdldNX1b3dtqPe6aPcO2mvdFJFJ7OxdHZnPDj3qYXtWhK2LwbCbTBQGQKf8Zr
KKSQcYmXRSr85FuIPvkm2GBtuGRvBze+D72kVIIpoKrp3RnegMc1CC4CQA9OGzoC1TqbQCD7nQcu
m4sZvjoshN0nLJD+RVROcXf1uX7SHSzxQpjRYZpS3wLmzwZOdjx5BvCmw9CfFn90U92twDfPZq1I
dqimsxssNRoiOPtmgfQKYKswVS1GmemB3uZDL8muxYmF9eOXZa8t+swfg7jQNiUP1YQ69UDVTmJg
wGUotfuULhinr0A4zEoudst1PbtdSL+6Tul7vRm27bL3+PyysiPJ8RwHxHZw+uoQajworj5/DsJc
EOONWf5VSNsL75Eljnab7YqTRZOEH7vrcxIZ0MnBJ29+9/LHknLFom2XCcDw88xaZb6NHq5u71yB
ZQK+1Yo1QUOBxyE3MTbUSa3+8Or170YoHXOY92yzaprjefWpNuI06J3ismer9T4qWcPpQRfrWzmo
ABMogeqImDIQbuEbyumc4LFIChiQvhTlgLYeEikGn1JfeYoA+C6+RtIZ09UINmY1b7D78KmI4YNe
E39vTiSANbWn0ECLuQKujKWSqoiFyXuYDLhJBQVyr4tCcNRJRr+Pn9s7DJHppFM98286hC9QCgIw
QkVhreI7PPKuFeoj5kGK0PHWq0k0P2iOQbsh3RH/9tv0x9UOHadBJKmv9o68Xq7tZviQZXC1NL1H
YzMFcT/VORHp2YBr2YfPUw2LtTn2EbsbV5TXfYmdP7wY3d6bPezPoo0tRsXzvySlDblU0FQcOl2U
mXw5fRnTa4XYqiQ2qQklUsxTiLhGmZOIAjCAF4O6j3tcVk9NMJpc/ML073QznW0T0+wrERi4ULil
ER2Vl+w3QTKQzIC2EceOy/YynDXVx/Mgg01Jd2sfd/Ts15QjzEDwUZTeZjixi42aR3DoDa/qY0Zn
XyHuLwev7wBRXVPJmGkM+OTCgIYF2Yhm2AmmRe+rHiRjjnJChwQ5Q01ByDb0fE51xQDyHEghuILJ
+kliXbHVcrEvLCHENbRt682GVgn4hzfI4VJtSplkZeBfoDW/TDX0yO58f1PPbhjBDrKg35e9BcrO
546mFU1PFN57/InesG3xKfOzZT8iFlFRCfQfQ+B9shnx9se1Fh5IklzQ74qLVso/k9bRNpGTyyLi
JlSJkHHc/nl2/Bz5t9jPPCBpV9m+dmmcGyTHPAXFjQ9M+sL7sm4QPOPW+H7TfA0xN4IteBMik8h9
vfzmRRc6S/S+KwxQwHsvOyWT2jnUOpvShExsaUpDjxb+el/bHcLNO5VJer0ltIxKPkNEYpdhdO5D
kXAy/X2duvNFJQtMeVcbLxqwiIotja/E4S0eEznRgK66fL1lzu6puyPnVSlUkLvC/rlrMnVHKHz8
RS3tS7iTUv25WvOq2R66uqd6bYN8JBK+12ulmYtGBhK+P1X/PkXaEE7IDcbaxHIi3sVVPXFyYSZY
WkIEavJ3wvMepYdLCLoy29od9iQ3BbKnt5eYYJ740eSj2elmJulstwmloUvlsE4W4BI7OIVrBGlR
xOZqE06GyY8UH8v56r5pmeuJcuGrL3QNaCO/nIZeo+b5nAIb5oRUkk7GXYFFxv2w5K/g6yEaOkrZ
bPrF1wHwdtoqAWU8i8F/cQOAvj03EsgyJrVZpEfGog1wiuC2jBcTabOppPgap5Y8Jf4CnDM3Mo+Z
htXqZaI+l6VuUjb4KAeRIutsj5nSfRpgpypVhmPBpg6jzbydAQjkjinmWPQwgPyNNFYguciSrwNW
q/Vi17jtju6s6YUpKsCxv8Hg/IAnClPlBm4zgYUNoWjGUoqvsbEl8y/HqRuBX2EpNmE85S431fS2
k5yEnCfiETgY09mBMLsgNtxR0raIQLFKaUkGY9sIHivwDrPGhKtm3DCGiMTUTUX3RV1Seqi4rz3T
N+opB8UehPN/Zw/pIU2zPtdd/owjyx9AcMnrtuB7qcF+6KQSKnXDdD7PmnJU9y2rey0YUr/1MEMP
XIOtqG8F7oMUzfhE/vpaW4pUFWd360OqCFDh7K5QHpu77bNB8fXz/rD9aFPY6ywCbUiQpuHgP78E
3RGLFGlyUKi2QWCqmNTduMNXVKMX22ybc43QDSi+9Vvwhds2Vt6U5Wrv6kgU8VRFIIoXGP8Z/yKy
5rEyYyL3O2ufkfw9tZ9i2F2DWydUvKHgJc8EYsPllELHqnvgtlzCLd/eKYFVkGFR+4OISFNfT63U
hPpwFEgqqhBUnmM2WDmC20Lpbv+mAolerOYBCTEgSh1WVYZpMhfhzWrRmC29ggHw3bOYbbOBA3yG
N+5UNQMaZCLRhkosVUQOGZPxm+YVJZrfgzZAkKLMkYCo//6RYLKC5kHdaZuivNxLNQY4kg4hvYCI
cnZzDPvm8gpGB5WDOACzKTBLTPFEmW9vGE6vmm5A9DZXX7B/0HdTVFyAjsWZhsUrejaSCH9PRoSN
w/swPjEzDc5q3OoA/wNMIE69vDAjv0jv/dYe9TtYIeaabYYaF4asCVgHtAL6wBqY0ogLQ6DbESA5
dr+IJA0X4Xn1wZPo7LDhj+YlxtGTn2sveXFsiHMBiz5AXLUZdAyk2cyt1NW6Z4simE7fTXUFPgd8
kEApCAgqZ860oS0ue+OTvW889jYsmtk8GFm4ppZBygHKY5YYmB12o7HSKZm/c1Sb+L24BL6pzLxi
zN8JnHgZWyxJDS0bBmfZgjlABTps5Pu+xQOtsgGIXYg2RUBT/mPeZcbcm/5Lb0GPTUcHSELrLa5A
oqQh9RGhZgJFh42sgTB6zYKa9umNA1V5XZOLL2LMwM0iOuNVM1LnY0fv6PZy6TrHlOnQCahB5dma
lKeEJ7SNrgLaic67z2AflHx22KD2alau+/3zSJyO+ji2l5OpDOoB9Uwzoa41tWlJOfptsfLrTjq7
NJ+bnGFf50NcWifPTBPjb4bfkzL+QxrNG4xdqaMWb/PAgiFyIB7+Y6vY1sIvruf01HYZdTm6U0Tf
flWb2xdOd9REmA3yOLgcinOOdoRu2RE4OWyzpJA6oqyyBRPhc/Np+ZuUmke0mXUzvWzKuFviBoJh
4etCWkA/vcY6ezhYvtlyZJ2oBReDPtTpfLz68F8hLAH9VCfr/QsbRvfx+vT/7fzqV0dG8FrXYrEn
z8fjF8O/H37Ta1zM3XC97xwV3//zyze/ff1+ZH4eI80J8DbCVGY/EiOTAV4wyWMzM0O3Vchp2jmS
wAcIdWEk2zB4fQJGjrKiYBhyWLW+fj9g2oDJUYwNU1VjmlunJp01yxZT1re6nBi0BHsO3adQNWmO
X7JOXO4tmS3cJic2sX2CuYb4FR4XG6VlnVSYb5QpXVG6MgIsCtKgTWHPFK7vG2jeYrFHrxUqQZmw
2K5Ghf1DcbO6B7Ec7aEEmLs3SR+EXFgkUWQYsqU0kGW6YNcZFINLkH6ZYxdia+b9ggP5QZ2KnJQa
VreYXoK1+54Ii0CAwc+S/xh+ezXDG8xcDYJgPZr1CfS4TrXsxkJyYe8t7qd70hbZGz9rpOul7Rsu
vyu696Pi5dKBGjc3q91i7nhup6YSUwA2u9oteJzA2HYHGuMBeBErAp9tbSqMRjpEP9nWs91iulns
aXuliuge5onDJXz33XdsgqKkzwf84wW8df4zHQcaZVYXIiET9jL0LwkNlxVdDhAaDGPrbJFqtAWS
7h5bDIRa92aikrDpOhim7t20uSU90Ka+BvcYcGswt7A7SwRdKrGN16DHogWUA8oiGabFdT084adg
gvMyVDqgEI4/ihfcYjrz2JcRhbpswlsEOvTARjGB6etvEQyc13AcIr6zVKqdjmdPtDBrXILuyoO/
3iS+flScAJwgLlYsk1bEPyDII/gszLZmErmlw52PWRCOiIaBFqeFCvVEu7vmelCUMWjm6uoKCdYu
yfXLHH5UtaFFLGN5LxGEb0Mv/AMwAbAKQ9b9lubgd0oNCBUW7WevKH4AtWz3SdOl+hVP5r8sexBp
EWFrekId172FUi/4DhEBQ8mcFQ7mel32I3UudU87JtPMHBBbRKpG2hoqcEMlosPe2YiKOR8u+DuJ
mIKlOUGpELPDbQn52kjz9S3st0YOcMSvtzAcsKNMF0b+vguV1VGVynJmRAmG0sblPEOi7aJHDB4z
2BxdjlTl0H/AbBCV2ZsgDd5CpzOKVYEyqIHPQXRar5rAQpMcgifNv9EQ9HpkVlE16Gu3kx06njYc
ueaVc8gS84CKMTeTFBywOcRg+NZ0IacqTrzj42KJRzBZ4zCEICWdyM2caIy3AP42EYmoVJHPZuJ5
cdswXDD/JN/IW0FAzQLzGWKSsPqJNenSjQpJ67XV0hupnuKOCmo4ygJRCrm6akl+88iqaf2uaC/E
24W4gN63uyVKJBiu9aRh/ch3MNfw2KDS3Sb/8ebD3wmY6FRQsc1lFSldPtanJzMMx31HDwqbpHj5
/hTWgcBlL8HPkUQZllsaTcYAAgn/NImWK/kDWEK3q9XCBveaf+Tn3XTT3EwXLgpYfm0qC5W63exm
Ww2c6ovKLmLYQ011reAEu2296CC7JiYwMxndpqgbtuAyvwdOMwgGN7/g5WQy7Cj1mSlnUHSvKyPx
Tq8FROXtH09fvz+dnL78Leg67tZDfl+Cvbd7TK+7mj1M6eP2RkTqrvfr/UR7uXd9dgVQr2Cibsd5
bIQeKX+afpp242x/wotLN7FeJMVsrZJ8QrjG0Os+bqdZZcdPGvMfbh6sNShwACUghS38+/xcsIYW
BcrjkKTTefvH7yev/3AKxZj7U9d0UzmZzKvL3TUQqZnNuztD15qu6QhMfPry5EdMDWlVPeAPLKrT
eff69+9OTl9P3rz+/Y8nb16/T7TibETOP+WLQfH3fXtfCcMLvhkUL/qdl++/PzmZnLyfvHr9w8sP
P55OXr/5/udXJ29+myr42bnJ+I2oASzsPC0ns4n+82p1GwVnvX399ptnL5iZxVxZVrcs+POybHgZ
UkzWo4DrGrQztDsSqDzBh4B75l86IbIeYDNXmwlouifr22vzhEjzPHw9YMfgL1iadPwrrMbV0ly0
SfdFIKLmknVVX8PKMJUvuzTjJhiD1u3nmsC/PNjnOdOuqjBbUookqBO94pKhsrydBn3VNI4InWrg
vMSIhBeeS5smPErwyk9IKD1lV1UbLSnkQCmEYH6AMzKCmC+gw+KGIiLMtB8UiqkK7n0sfGJy8H9z
AmsccE1aoJu8hHdUvAe/L7x6glWXCXO/GX4zsDmnxeSNsJW8RbeAAU3XoCS8lDUivjc2QGSFYbRk
JKYbamRyY0pp39wqPgfVsrR+T4m44bVFbJCALWl5Lkw6GSdyNSeWSDCm8W6uJ530POvhUuG+WaKl
3Hzjel7N2yXwq3nEcYmtWJPvzuzsxXlYJLyjNrz94+T7n396e/Lj61fJyCj/fKOVP4GzdIKnYDej
/rxach9FOcqr5edA6mBBV8uzkZ7J9qgz7fi1bcf7nz+8+/51KkL61QrchgFz0ExM1ISBFbMZHjQK
iTgjqJOoKVF6X4OxnhYmaj4QTtIszr7pezjq4SxTITZLI0QIksieikEaMa9vzLW4oZpOme/K7Im/
idX5Zr8BjUK9RZecq1BVelT8viJdDrkXImI93DZAo4N3aqsuwfnZkLLIfJYos6fLoDhm/TG1mu1n
i2oYG4MyR01+aZFjsVWu0xmRjQq33Wc1uOaPlrhqb8cVN3DbowJnYNZ2Pwt5nvBbap+2ueWcDSZL
HHBtNrf2JpWgGqGwEFBTgwH/jhQcI2putp1BNxyxmhbRdS0xXbEwEklTwO1cz00wiIqEYuT6IZG4
K8+HI1Tf3K0aEO2vAVDJd3nfrABRYIRRdKQkZXZeKVQb849EBhqguGbzOJGdcg9h/k8Xpmq4/fhp
VGlyCqPPBDrzQv9BFfczLEMUENN7UIlLnZFwnojKV1eqOHOqiUcKXym4efZWIYERUHP4DTcM02er
YvppVc873oKb3e4LGGwodi4RPfcQvFJTKAQGZawWi9U9qXs/TTf1dLkdwfjpWk1xpphPKYXxFNmQ
F9WWbsP1nJr88xoYTdAtA4SkrXSAHoHt6q42Sd/+/P7kD72G/y4oZg5KrXA3uTHN3Lt9gsZyTNuX
kZeRbxkfTiAI2DIckyoHrlpgmAt2XLcJ2Cj3rrqbdT0VGRZ+wBlvvnB3C/Ze+9nkQf7z+8whXkXg
ceaeM6T7bgorDg5hfDt8/foPJ+9P03vJUfG6Rq00DLJqo3I4mS7Atrln19miDL1e9LxE50TE2oSL
VL0143ZpTp9bMy8u9+g1tDyGDgfvoWFxsizyhS3QZlYQguh91Vss/j/23rW7jSNJG3w/8+zt7Kc9
Z7+Ui6MBSgIgUvKtMYbdallua1uWfSxpumcpDgYECiSaAApCAbz0dM9f2B+yf3LjiYjMyszKAkHZ
Pe++765m2kRV5f0SGRmXJ6wBEVNznVUsp4O90Gvswc578ETHRsFR7BOQUU6bxurHpQdNwGubqDBU
WaCK4tBiR67DVGx+21CYORqpb2slC3+ZrSTmbTSLWdtNUDPBrD97/hy6wTtPLfbmZjGkbblS8qS2
EbKPaljjQebBIMvKM+JtW7PRCROX9vRUr964mDvIquNwA9st1nFKdU+e18VmZsJsssclNnI1DxiS
rj8kneRla5GcF667HQcrESdVFho7JFAPKOPVSaS9WG0Qf7jX6/nR4oeoDMu4IjtA5hl7RIZSRm8T
wUQa0t/IdpgaZe8I6C2K6FQDXrsDNdUtI/AT3bhGZ3NsblfbqcfGJg+vXHfcRyLUlKFbgBw7tEcr
D1fYcB41mvAi25e5kaBVOmmeOM7FovJH0ZVpSPxZ2K4XXHnMNsz336+LEdxwNFKKkQKYOBSAqw3X
70u6MZcX9GfMCsY/b0uJq8nXFa4IpIkVxWDDWZtK5B20hWg73bto5R64pAhCFnWont8yZba38SeP
OnpVoPJFoXmW87mL4hUHyqFyhyD1Xht6bqyVAFXWDCEavsyvzQD5Ha4dqJSqZ7uD8QfEVy3moNnr
lQofvTAGyFjV6F0vUrKsCS7bndcqgcRv4QQ1nyu6dp1tZzCE7UmcckysZBMc9lD+3g+sSOcNQY7j
dLMxLvKB64wyXI3Gl6PzhqVXG+A7JBL3C/YcBZ66Q/gQETxEhQ7Gy6ISOvzh90M6w188f/vjz/8i
I/BbFpBK7Aw3HHmz9NGPm+eafb4QL0EdzcRmMY5jvDAsjgl1TyP9JD/QUXBmJHHVhjB+9Wzg0IGt
CxsDGP6YTaIn+UKCb+zrNqeKCbdLwaYQgDUYYjv7QVg3sDCwh1UEL4ZYVd/qMtSIelVUep/7LYwD
h3S8YYQ4vdVwPDs2EtLbBAQZGzZ+0TiLML5hNLFOcH8zdtc/SQxt7sBoBQJHd5eNw2D5PbBrQvc3
a7U4FpKfLig908htFU9qTidsfRVTVSe7CXj4Nh9fLBn795ZvXBMWJRl5ivw19sh6KMHWypq1tJ9n
QqU7iqwnxiopbnupNbeBfZFQeDahSpQ2aRkyw73ke2O7xN5GLdzAN5t5rmj8CbBGuEO4Vb5MLgrV
hVbWOcbDFKpB+qlCn+AwghRWL9OLXtJ+k5tSjJcU7rQaHsvIrkdnxVXeEzKw4JoGwF1pO8Pa4/fG
yMDbD1MQDca1Ye4gvT5LPf3pyx+DdUkLf4+blsdHiGbSMF88U1iZNFVQCnPGAUvdpQ30whOD0Mzr
zQb+DAqrjqGyggkVL3igGIfqF4btsUBGmSksVTjcsrCnO8mXM5gBuje7s5ylA05B3Np841ykahQ7
GNMeN61t1I4scXFtPGwK0aH2QC7b6VfztCNT6CRVRWxvsl2s+KScrhriP1Oh7pHgIRf+/BqKuvfr
98u0ly9ZV55uN9PulzTb8iny4WBcFJcz8JVsX97TZdpep/96krzfvJ+ePjzsPaQ8NGAn/cEpXp4+
POm+v+6dPqL8v/vxh+G7t999CTPR9zf59P3N2Rn9b9pSahDnsytV3Fu6XW8KF05LZvzhdPnQxdeS
zTHJVd9uUeS8SZHtwFcnwXhK18FSf7G8mq0LNmsJ1nzIkNPZ3KiIdCNDcSIDNWG8X3iU1TBQmcgn
vQqTJ5GvTikS7lGDGZkc6ogD4RHDcbCJI5Y5Y7m5J4BTEqUpiR7zJ2nbGuEne8mb0cQygGc5UdYZ
/GKLXEFNgd838VlisxZYkDFCMNQRe4Sq+gmn4RRikyJh9PMVopmzF+KtqFAPPEnGbNk9Jnr3bJPM
85EAhNxadlt56cQGM9fxk9VZ9rLkrdsyPofXBgBLaI/pEx8jciegwwI4SVXLDVXmgfFEnQq4ptWe
wU+UVckgE35jesk72J1utktazDKiDmzAIVtnUPe3KzGLWW4XZ/kakseLrQgVzeEmt2ZassSOXuWs
r6PJ9dFsGL+oYUrNAmCGZVk4i6604+jdSTSm6KSXfAcrElBbBoADoCLil8EGMU8On3z+m17yLyOx
KzVXoEDjeAjwDVWfrGfnFw5bRsvomL1HWDrMxu+pF22LEjyJJOhIzkeuvtXgEWtaByPD0B02Jgju
90rQegpgxHlPjvoo/jSzVk975jONQvYnVfas2dFLiFxK5+aEha1ptlMdyfBTOnWl4TYntEwFSsfh
ipQ2Ri7ztq4YlPb9QrnpUEtx7XRUjmeztDHAxDsxyvqWU+/A0j5MXsHQccNHOxYukWcT9aD362h4
+M7oj8aBEVmaZaymI6USMPANWF5Pel+IqiAXL0z2Bc0/bKmJtE+f9o4r1hquFrRhYcKMoE1l8nA5
uzEG86U1WA5tYiJnlA60wbv8+XUneQ27pdf1c20D08iBQ8gkr3eyOZIn93h6las6FoHZiSbxmjKU
IzoJvnhoZHhWrrgfFwiFc2UOfdUgtdH+CF/KkjhzyCANWIPKCzyF5CDdp5MvIWQXVdFcJZrOwTBT
NE5w6cT5G2v9mqYLV00rPxRx0UvdeazaZsPMKY4uuoixo2Q+n985btq/vUdOcTmVfdol86uEfSE3
BT7SEeYxYaDXDyn9Q8s5BZZsLnEFxlLqxb/8o+C/JOd08mxwvIzn2xK4O3qJptJxwRBsa95LBkxM
3BxYceWUt1aD67IQwZ0qNZfqfsqbPFADxC+WOgxZxMqOWg0PavZ5sgg2EC8ADcgo0Wal1bJ9i1EL
+Ez3IF5aVD6j+x2LuywLBgoVJky0PBAp44QEbW1126akQ5Fx4r+P1LaOnWY5NtlqNmn7Jug7uq6F
BUdMURpgTZNAR8kw5SpuNwrgqgRYx8lGulWm3OpXqbHqsSMAmO7yUq+Xnw3Ypbf0sNIkeh1sIhxU
D5qa3oErytl5bV3fdW0NN1NAQ0P5ae0e7V+h7bky2oxwpVjJleLLuuVw452i6Rw7FGB3ZrAMninM
3Lesq5/A0s3coHseLwRbLLQng1nOl8iDp5P+p6fGTse5irouLg7YGu6i26VzG+UiPu2fAnAbxcjt
9O5eMPk2V1dG5jXX1jhOHtaseOI8p1XBXjihKc1bIt2tUq4TtOZonfVcZVFTSyqqedelWTZAeD4t
iolrjV/JHCyKHU2JuOsJjrUlooHBKSKzIbA1l0iVDcvRFNH+NBD7rOiZFw320j0YShvhpbXyJyKy
VDe30roWuK/h4PFhZCzqWeglWLYS8pBlku0qpLrBlXFc2DmIeGqkl6nY6CKBI8k36Bhtibrs+jkg
ckexajttgotfObS+SwaRrs02x/xF7I3Z2lj+w88Kfu6WxNbcWao1slMuOsexbdfUmhWD2yI+IZwZ
KaP6NA6rHvNOU+AGpDAwQHyR4m1FpWRZWM4u9QCCbamVxdqRLe92J9BMHo9eCzDmG0RxPPXZvOf2
Oa4zGNNJSotqUE+P3p3MTqthCR5wwfFd2bWsRt2E2XKczJ1hjButfLodr2+JEoGG/zt/B/f6utj0
EfwGUsG0Y1+/lEAvSfof3ut3b7Zn9LLrv3w2mdDLR/Ty4G8HB2ezZbGq1fO72ebHNaX6q5OR3v0J
tr/pv/ovny1R3j86L1+9uZhN0ZyvvnLe/mzefv2181Zb47zRRjtvfmDwk/Sh8+rb2RW9eey8+W5e
FGt97b7/oUAFDx4QoaZ7YjkerRQF1oD58wbcGKEBsrz4QDkGA6cQGnd++Yn78hV30XvxAm/cNL/n
DnsvkOZrN81PxTV653bvZUlvZt4UlzL3sqC8ucfbpd9YfilQujzLB8YGnrUfMP6E9UzlZzgu5kNx
Eqvo+Bu6+TBCj8nj+Quv8/F2XXI0cEvMhaDNbu4qXLdIKglS0BI25hlaPVJgXMlfJTWbWuOHV1JV
xb6lVTn4DmUePBCI8cVszooijCoOpyG/GaKAkjsZHMDceU4T7f2BTdM4QP7xQjfwBs8MOihlqie5
YsNkfS8YcRXewldCfoeASgztFzuaH1L6h54cmbUsEfxeHOJnxeS2X9OUC6AUblFFbxf0CF07l9IE
3CuNNaMx8lU8gA3dutVaSlU5uFaOptAnjpaecK4Yq0M+t384nG43HMLSFFk1hvhFRNeDLy4GkR/b
zjmtfytlJJ3sv13dDs37NItGGK3KShsd1lIpiuP0jtZp5oDZsjxkaLsRnnJw3HTBNe12OPLWLMLc
GSarPj/ggWsVMQxrGA2Y6eDNal2XD0bcxDk2soY8ZRK+WceAfalKE0hZvJjLmNVhql5S3/74+u1Q
5UC8qyl7k3DsbbU+YJgyATYH5AcRwcQuaVnUL5YG+dGAvTioAVnSDYBQG+YuElRtbqSx0cEWBfd3
xNWq3SONEuN0JV8nR7GrSCJptNt070irNR8TmdoFI0X7RLTZjhzrjvp/fOBbBtjdI+1un/DKPzVE
bVCnbYOjqOWfukqPmP7ozjx1jX14GZ9QM/r0PzXzQQPc22AxnwvIlnidHbiknltK5YQAo/ytfjIw
SzpRm6msBsMjYpcpo+e4JwO/iR4KMeitXE6IssktAIiVFikgNmLEi9vTKV9uFzD10IKzneHe3Puk
HFTMhfHA3RH7jbYZsfslu23KSPd2pqdO9EQtJvCUgrnIte7wZGg29XTLNZisXNjO1M19fvP2jg7b
FbRHdaViqcvJLquEGprFbWzb9dVgmtRIbytHIeG9VNxHtGctbhmlquHgPuOEIWgsrLZbwp5vkngz
UWXWtHLd8ZL16KBuwur/bB6L3vb7fCPAOTaRx3UcJu/YqsgNKiFyHxvbETZOyw08SgSGj90UGBny
wiP7ah3MJ7oMgREfLk1stp5pw5AxFPJ1aLxpvtue+sZshoGDIaHtPNU1O1/aGOGrAFXw95AGP8T7
hxgI+Gq5A2Awq9za26H/qeXlTLN0b6NeJtbwg1Q8KTmnEfA+y061QbUeRPK8gqAq8wLFMaxQY8ee
MxqqJ7yBPFkwm8zabYxqd5HPV8TspiZrqlVU9WsK16s2As3PrRhpYutCJoenVzmtCh1r23mXY/MH
wbJ+G4aL4OE2t462lgSxUKqxOuO5ncFGM9soTY6HNnekQyeBG7LPxf8XljRukVlBY7BVgSQ1frwP
kfih13VNEe1/xfze2YdqBNwCO8mONeRIq4aM3xRfTcZlbC2oAvfft249trDyxP4Ee4E6a4EgHrDc
raofArUydWNTlhdGlqc2AVEv93gDIo7ttJfHl2Yb78jqgr4Xq1gTRBQ3DIbSh2PX6nyWx/qh41sE
U7OxL1Uuz7/7Mr+1XCPdENr0LBA39IMNRxW9Henazs1I+wS7Z12V39JPZC91l5icgpVCKyvzMmuu
382WP4qIlQejY8RDMMhx6siiJ4UkuP+KA1aAWOPdnyajzo+jyef5Ml/PxkMX5jpgTWnjf29thiwH
4VuQqhCTlVDUGI9YqPzA4RCE9bEMgtNuuyj0DFx64SdthLeyU19YFTXomYOGEmYhlPdQOFgTWZ4f
hr64SV/2FuV5XW0i9qrGTW1kIGJ6ynHhUQAGZpuoOuVESz9tOpF9dl7jiSwnQ2JoZnCobTdxGJGM
4ZqTO53A1GjgjixCTSIlVasz8jFG01ybUx9yx+h3z7bLMcd9dbgRJ1DUamhdRDsu2be8Da9YM1cb
L+oHXfQEgNK02cxSdYLRVdHi1tq+VRs6PzeVsSgcMnYiCSo0BR3wGrgXe/Vy2jbFdrh+nNaZq+R0
+5jqYIGQOF+qAcphrLgxxy/IpJMscwHjLHJqneybUhwyuLCk3nBUdTUYLGvL88w1Y67zAlYQybra
Bm4gvxlrJmZoTEE0NNSQ0zo/4x45EYSarwfJ00gwsKHWwQD9NE7jsLj6XXJXvjA3JtNGoOF83lIU
aEe4VawRl77asGe3uA1uFJ9/YYDse7WD1WbpB5Eina2/44RovrvWotrbIn2Jwpj7MEicI8mm7Dhz
jub7Mx2JB1rfHly8O2bfzW58o05PdFluFhXGQlVayHk4SgzksNS+Z0RO5jkUq9cR100V4WHy2kQY
r7HUdAVXFFWx/8JdRsTTMt5TcVgesdJkbqfSld8dNmuYETZhtrwstZAxVKB8Fa6kvsW4dHeWAEfr
JSDlSiGe5g3WuMmWbimq4DTUhi+eswmKEO1SG5zICXI4Cs5JoUp5/44W71baCZCCNhKrinkyOrOU
Av+4BgE+kdZ1tIpTj5Catfpy+uJm1UYxyjEY1oDrsTJo25nozbyR2Qjul7IktKGyKERb76GKlsOr
0XrHBZ3ZTsYn9Fkh3lPgYTFb0Q1Gm6l2tbSl0eVSNlsDIRZDATHScVg16UCvWEmuH1221ZyhYAAH
tZ1iNywdrUM3TeS4ZfE1FhqE51qlsOi+GH0vbuMntRBjsGX2bVtOOnBQWm+649l6vGWCC+1Unk9c
f1EVl175olK/OTUtySyCAoMez5ZL5rciolnG1GBvRMYk5fCfa/gIzItixQyaMAEcEiaOrxK/LBCH
gZI7TguExzBmK3eUBeMrm7NOt3cPvK5utfrwmLQrP5V7XEbvOLpJatcc4ecDfcpcm7SDy3EZpNBH
HdIeh0xSs2jInGJ3kMc7ZgNKsTbKr8PRzpKvdM1HYGixNgaepYsnoR42WX5rxgZ+FV/rjMCOVbqb
k5V15iyxu/IDU1fohb+66slAKw7uWKFeGp4thx+OMa+yi9NO4hBEHp3tQmykAkZ8x2ryqmu+dHuL
WKntrpNkXjtJzCzKUcJ2PtGwCNYC6IR/EbGGUzmMAYbONVMp4ZD3qHmo71VTBH/fdTV315nJ1HFr
yeIXd9PuB0nbbUWnfpCyFEbPURgfuSKy28VZgZZbq6QT/tXQ93k+3agox/wMui258dFpNdxtNJv9
Hc3HXxtvcu0HZcL/n7Fdum1BR7vhln7XiMugOP0x3XYKWTeMvHuNDMbasogdpojOSAMmYUo3dTpU
6L+REUD6Hr45jMj6nBMGIgkUxbFta28vr+PvYSkPNatJEos1iNcqrGaw8/4+JxIl9CmWaVsVSLT0
E9g+mRT+bKl08rpYT2xr9Hm/FmniXoCZ7I+QS4Q1g81I32MnZK3dTnoIpwdGwOAbJaL9GP36iJp5
iXXCy3ZHO9KHzTVLb/vxUWisWHLdWW2kXi0wfVC2zS61q72TtOj/xVbWluYMMtoVchHVpjHrquPM
YucupbWOr82xU0Kq1XtpvG26l5DUnhJb2ChU4/F++e8PUCV+/Y0HxhTfSapfIeWqaE5VXk0Oa+8o
KordeMEf6lbtSNAbb26qEzVrDhDpy7W5bB+MXvHSIwSN6wm2YZwYV10wJk3IKkq56A3tF03Ninlc
b056Mi3hOkbKhpmqOu20tvm4qJ0TKgTQo4KeamGedl4S+ODi0hvOYRQZHsPCSFWSruDmLPc2dnW0
NuZcDBuaO0wAYB4M9nB12bjy5ooLuxK/SadaR+CN60hzMea2cmc5M0ymbxhfNVqGtsf29vxLrMtH
VHjpU61Q6n+7CN6omT1emsE/DW+84B6gqBoqQ4ZmzzYh+b3RmeNf9ZlzC6jdCtEEawGhJQUXqduF
x87FGTnTR/cgfMMY84vIzVC2i/BeEdbL6Uo9b62KyOVRt9U6kJBVi5x6gfuj6fHpXsJQ90rsrLaT
2emp3cnroCXxfRWZs8BOJuoVEzhATNnPE2LGK7p6+VJGvgvp6eddu0LfkbRzpwEV+v12u4LRD82w
f2+6R+Zqm390EerZ8pG5rWNL9AhQnG+715OvQ6hvOWsCmeezJWufq87t1F1wCVVaFy58Xx1q08VX
BRof/vzuf0JoOYlVd7W8Hn+4fPvN/80RNg7ouUubYQFSAkTFCUCZ5rzK5VLEDr5vtmeqvkn+WKwv
Z8vz58XqNgHwOjs5vrla/vG5FoOXicH+AFasBPKldG5ojgJ9gq4ffnXYK3yboT01WjvxM0yIje2Z
+pIeaPQ76Y3xCRN80YODw+7H/0PYvJEEYIawoNxwTGC2wofTJwinRubC+y6HFaY87XNXKwQ5IWJD
2yDuxiJj5g0qrYjDAwlICF/mZLHpwhTql7VffQoYzaevQeKKlUqn1RKFY0rYpyruqL5ABnnSwn6m
rr5YWi+vSATJ7Zp5liuZSKKQdTaDkkDysQ7isVImlp1d+a9tMfTR/nbDTZSbdT3ahDHpwdwgksE/
0UEiJ0nbtKFjq+0ENWVud59X44ewwDBCx2sBWqL1jRgzMI8bF7Qj8om7SmRNMIVZMpTVbG1XTtlL
kpdTAKgAtGTEXYNrqwVx4nyMbcO4ow7Cj118qpRaFzUPYS1skDw54ljuEPmV6lEhqA7XbOxxzsAH
xHLSI7vvm7h9tpI9A3twr4V7sRlYM7h36tV207yEItEzsFx2BM1wk1ZLKB5pIVhhPXFwPvBYLeyN
W6u45A7U9Am8sAeSFCssgs3Mn+JLOp62vissF8ARlpIUigi0h7FhKMsd5v71I0e6Nqg2dzu2iWPh
lmkQrEgFGf1EpoGSQreGbaVjxuRMvCu9Kq6DCfmI+XABG+y8xG33Ud9XtRl6VNEG2lENrinaUVl5
rKFaLPIJcPCSF5v18rZxalyXUNO6TjXzB3en7R7XQoDp6wNLiYSmW2LGHt6HCH1LB+lKD0x4oT17
9erHP774dvj8+2c/I0BROky6j9+/H/xD7z8ePUiTw9FkUtlmsyH6MschDMsIRCJlnW8+OYiFf7qe
LZ8+0fhPfj2P6Gs/9Ssffv/jG8RfClImrd/2DWYaoJWulsqFtOnv4OR0R2A0CfvkISPQmgxRUq40
GIYwF73xYgJ4lXaKsep+SLpdrc/B+rnqhQEFUUirp2In+sxBcuhFhgBPTrJ8bTbPVe0Wf6W9FF/W
obLmYOVMH+mnhjfmt8YLWsMLe+P/CbWHx7/leGoH+S3ETusfIDN7//4fWp6fIhIZ33LAMoDBHJ6N
2LZtXbYFH2sE7P9c3w28yXNczDnYoa3YI+qIkjiaL7eLdrBHwcfOlr4/+FiciZwq78jjAgHWwAu5
b9w16pV0qiIVxNddrC4zFmt92MIejsFdBWxOjNxpU9AZtqYtR6zr+XY2KZLr3jeGjdoUIG8z4Xt0
SaR9+DAbJC/MHdKxkxdALZxgaRcFdF6UX+O10C+zrB63vNhLGieywhqnzvCetUh4QNbnisaFRL4o
PfCM6OxK/fUdmiXvfUSVdXx1aMuDIkLvEgm8+8+QcvG2bKem8Wi7IgMxdcuYgg1/2T/D5NFdBVcU
hMpty8WhZ54rP2mOpBtw7Ty+mjtk6HuWF0MwcNp/j1sOJ3WOhTdv4FlVtMD4TvxN+GUBAdzctjW3
q9fuJCkSMT+IKwfd4fhyl2Z3Msrom1F/Gm953P0Zwle62Z4tx/PtRL5cdcWsK0uanDlM052aL0bl
RSOPjo9tN6fT6GV+rbzBw4eX10GzNVD5CA4eEmPG3EnNQPAgJM+SFrW7ZcOJ+2yChLUGHhM7Jhm+
1zcA9iObGGVRFZ+cWyDmgcVW6pW91e8fBIf4xWazoo2PLQXR4GOc0o+R4TEj2YDM+hn+2nDB+6vj
cytBF+4UdARF/jUxMRPuk1PqJVKX3zu3LcHtpbuOirM/A85I0EuHQyhIZNlUAsXMTazs8eU1IH3a
PM3Vtc5POdoy6TRJ8WjS4rdj9mZCuZjudbzOQrhuDVawF89u4RfR9gchNaXYbF4ZVIQfHKZlPrVA
6y6va7xsy82viTiEMBUVzROnqMahzmBzyQ4X2np5vUswtTqT0aPrhgK4tv02+cMUSmU39dxoOY9m
zcYGyQ20Dv12mXjwUZF7FZdP31g96O+gy+uTanTh+0M9kVSV04rfMJ07alwtmkmQkoi7WUT005tR
v9oAeYzWoxI7lnO3TT0lxBMr/LHjWMrU7JiZ5tKCrEpwKb1/FukK1pCZ9DMgtJASVIG8mK650W16
SdIiUtiSMIxyhngNZOkXjtYRTkPoQrgYQyN7LFSRYiG1lmbmk4BO2yDsjHu/Bla7NsJbzD2f4N4W
Ww6uImluA0ouNNrPcU8C/auT548lzr+UNO8kzOvQR0Wma+Ad+cqT1reDZ6pOE2GY1064l52wob1Y
nHZc0zkJbXazS2O4saXnVEdt0QiJaDRHcQw1TnO3ZLttdxYthXWPT8OiYq7tpkjW8Gq+uhCCg9dL
wt6aMX9B7qK2hGho9zguj4gdV9Vzq3WPyEu7ijrpAx/LPs36p1GxihlV77BoClhYjW7jWVKfLxwk
dxZYO2d2d1yOTqiH9eR0wndyvEA5M6Mu4bD/dLjf/IOVjheA4o9z35v1NrehcEWtcFXF3FCTaAZd
jnHewM8X/hojwjCZXFUMaKBdidsH0iJ+gHNG7EPmXSKWeWNnHMA8JLCFVNl5Eyo/X/feru4hYMT5
VtW2ihOObWGY80yF2BwfdIErt99Jcy6gFVyOUHwJLoezqmyFcdniEVPUkiVmmxYEblKDJZDEDasS
T+hJaZgliK7RGxKfZjV66t2E6KZLrApSuiZLetD7bHGM0mU1FYfP6ZoJkyIdUTxdNSupbN3f3A83
sETAGTPk5zMoK8xdtecNJtuv2IabSqI7iJP6DWIf4MjllRVajD6glUrQab9q00q3ci4wq8Njckon
qPjsLzEoi8RxukcSMwCMfBsMCqtFG+/LEjmkh0KqWgVkdXe1rKeRu6/y8QzW6jTkzkq5mgMToOUC
uPeMnpBIDCSXjIkk0lgPLSb1jR8IK5FGI0sdqtxwNp8sRje0HN2eHQarilLMFttFpeYSgQP6xSWU
SdslVbxF9UsllDiUdl2ZGTfC9EMf4of1n0aJwKblxFWw4l0GyGlhVSA1sM1bCml6IoWA1bReOA+9
k+DKHQFa76JLNn3bNQ6O1rlrxwJNd0QbXndFTlQTHx2qAVqEZlgxkra8Ss/BZuBkw15uCLw+ml/C
yRFcidU6dtE4c1jNbCj6wwp/6TgYQSuaP/SpqTeu3DdvuWRZPdMKdULaInHVIwlGNug6wlJyZOqg
HFUFtBUluZMEkv8ev84iTa6UaodNCslDn5ewdaQSdwMHyyTfIAYPnCXzazPbiTfbYv5kY8dFV5cI
MhkuOV+XRoppnp2dKlAuNaVsFLo1JBk8jko3LrEKlAlvxaLTrm57jCvf2x1p3tt318X6snT1rrxn
vI/7trtqsGZvZ7tbydFdG5sZw3mLMY5epCG3FyDGv96go7TWL++P10IJ2vtL2/hLGhVfC3z8I6rC
cLRaD/lUFOWs2ZUzq0Rfh3em2D2pLhKrdiPXoxvOVHJQWQisq5pw9TlQto2YTwVJZ101SK/bTNrv
D/6l+2DRfTB5++D7/oMf+g/epL5qDdkWl5ypKs8aofxEvAqcRxkPhbFLKq3EKMFbIhWiggVPPM0R
iL0UForOypc0MW+ulsamy5hk01k5H/1lNr/1cF19Wx5hQS/zW7Fac8jIjMWzXuKT9o2eJUy2blgm
qVlPA5AFhzK7dws6HgFbaIsEdky/xj5q5bHELt8uyaMGHx4jyqvXMKNeIU5Py3plao/dxLrKrr9h
Z+3cMBKhYjymmNViWq+eD5+9ejV4nrTctUKX9wMJpUccTLmBpm+7vGTeSONXlMX8Kq9ukWAKiB01
mhG8+rAtxI0WkYzKg5evXr34/bNXVuvfepj8NXmfPE76yVfJ18k3yftN8n6ZvL85OsN/xsn7dcsI
cBLaadSposTNAzPuFSad8l4RI7YorvK25MgOXr7548vX3/74xzcaSs+1GdChOUAAziHreYeTWXkZ
hGBr/Stdtbp/OX3ff/8+++bkX/unj6DBpiQvM1dfzcc/q5d0Lubz/HwEjslr4IlKMcqVYR1cXor6
alvsKK6lKNO3Vr9VQ/gP+mCiJ63uUoG2eCJNdGgF++NgW3No5vqZViVIxqIpLVe+Th2vBSZahbM9
tldRyAKbTXtxV4PMuFUgfQ84OxragoYWHxC9RUn35mK4KYbT0o5/JxlNJqPNAKekdr82RbungPPz
UuavYLw+8ak8Z209KH/7oOQ2lauOTWuioJiCIrm+f/HsW5PPI9XlSrpFu2oIy9PaqpJ+artrHedz
VwrEJszF2gT2GlTgfHbW47c7VprIfwYNy0nqcsSupjHyozLxeP8eNh6P/WXKZfTO18V21T4O1qUt
qfX4Qalj6qePFH634TV3V5t9AtNqv8ys72Hs1Fgu2yq3nFiQmx0JVeIWW0RVp6uFJO/qiylam7eU
NKe3nJiT6z9+7BeeOZYJz7a0eEQf6hz7Sgdo77FACZpNF7ad7RIqC+0dJ/y2zFXZuaIKodTuiPXp
EIXyFu1IoBHa6rOr3N20lT2vFgLDFP0ZHvdSNm8L+RmAZNkqgdNuH/xETjME68I8OVKT0WVON7eC
Y0rUmNmtC69pWlqt21TsntKWK5Sj1k4qTkHavjPLmrHmHUUJ7BBRUU1+aATTrW7XNGaQEvPJS4Gz
dHzfA2nNrnJMC6tyJE9QkJHQOuO+q9Rl0UWSLqduxUtypmN3Ucuuk7RV455axl1zDUCffa28v9Kd
Ytff4EGZ9Hq9ryt7b7PQM9hF3gzP5rIWPE7iffmw/X7yKOO/bx5lSbv3EAdstR09p4Yd1kKrukkQ
8WjTXKDYOcrTY19yV7A95rU4U9AGX81yRyb9kmPQGKlcUs4Ws/lonZhAYNslXwLYxos4K8v8+ekc
aSj3wSpxUfN4PrNxw33TJWHVfB0AzDI42vX1GJUNxAiJaUZgqS2agNCkA2HTD+LgzVIikaF5BLFF
PlYSLLCJ4xgSh64MSe8ro8dKnbUs5tC9VLMGG0doVSTTHkdb9S9mBrmfyZs2VR3gzMCE4JO2A2q7
ajtlGusnVyrr0deaNqMyJ7BmZ7Rhziaj5KbP2qWbqtossERTIzJ8svfcq3hJNyI2oM3JtGVwlImu
wivPSMN8Y7YdSjVXnuAMzgB2DBLGCRmIctuPrRrkboOJ3mxqC5G6W0NKZbLHb5vqY+OI3eum9EOR
3bJAGT8P1aywbd6osNmzaqqDc9qyOm5EqiH0qDIrcYw05Dj4OFPDPz7nSAPe3HykpsDKJxHfW2fW
hqayhHU2vpybuIOCRsmza9DimvQddqolfLjE5rIqnqtlZK4dUbzx2jLGnlYK/8fnXQ604CsNmydc
yzPbVCs2k+wtcAZYazzx2G2v/WAtgAee9WRymHSqed37EDUKZGdjO1nBWYFkNLBWBuxQWNFmrxzX
+MCdHuTrBdybWz2+U5erNdVhrWh42KgClv9wqFqLro2LrsOUuYAcMlRhB7OGrYJWcP2iwHbaGLQv
VHBjY520cNQKt0+JTsOGi3u82UBYWZkX74V1xlWJJvCCD50xt/yW4QK9Ur214hqOw59mOrtR30XB
NsyTMyLNCAaOcOEccICJ5zWOLZZVOs71GhTFkXoBpCRJhaFz/LRrEuWoiBmA0QOWg/3w4s2bZ79/
8aZuuHJRzCfCouQSX7IXleKxTYBNc0LfYQfYel4vULzmIi4gIf3ki15TlGUEuqaWxS1L6g1B2nuY
piCGc1DIQV3mHtVkBY5fKnSjTEMhVCeBVRI0lWvIvXPI8HuwoVjXTbIklQQpb+FKMC22y0krCy/U
PtcT6AWEpNStstzC0xdPjujfb/rpLy4bvg5eu1lxL1oQ0/KG2KBBHoMnfc+818efUVee9PfNkGq8
DzF6n8zWdAAW61szEtndQ/HiTy/fxIaC09WMRLeuDcT1jMWVES89nJLyGbcMMf949/Mr/0QUAmSI
eEvSE9d0QmWdOjSUue7CdQTVmwXxPSGtZzmIppfQ1eBLqA0CyqrcvdeMOlCVOm1GTyxHne76Kvua
XpwrajAVrkassN3eWI1RYALnNrbpah33nsZMn9FMuNCxpCndIS2bTZvL3VHsgwlYjNAZMU6d3GO1
1V23FEhfjMhiiZhBaVgl46Ilp7yzQLYrxP3V5YFF0WL52k4ZFRaK5HOv17dyqNo1a/0+kjbPa/fr
BEVn/goi7oBXEDrHDTitgR41CUKQ1QJZ1SQhaeMwbGFhjszOMDAkuwXyYVsjjgKYD1rXraDrAt9u
LJKYY5YdwlHIb0uMwvU4sl8rllmq8yryQsOIacWddn3gKizdErs8FUAY/nq3oVRg6+aw9NqChxpI
heUQjr2bczVZwm+P0tc4WACdVAZivrmIlb9x/AkmOrCxp+tq4CIhCdxa6oE2mrPz58Wlb61Qu5qz
sp4rcEdcHKQiYy9Jxcl/JBxkF/yYscCXUytpn90m6tVA90ofho1XCm0E6gGs6Y0l/ajmB8XmIDK9
vAlboQ/V0voMgH4SNRxTM7AHz+DGVa0NntWoQebKzBDzlA99zhdzpONbFx2ZL0r88slAtDF1Krbq
jSaTGjquXNx8Dw8mZexOBDuYTnIURzVbNSwJu+ZWkQW3czGtdE+mqfvOb7ZtciUevzS2P/F9qrzF
P8Y3rK4atsjljoeWsjutZN0Jqy3y+mlSXbZanLiV7Ta/5dhbk0nsDg+v9QKh1OZTI2at8yZcE6Vs
OdRC1bnmsBkPjjuyZgfHNQKHlLpTwBK4i5l4uLwHF8RxS4Lp0tr3F+f5soDhJW6sRGQBCMKP8+vR
bSl24W1zDSumPo+ypLTzW5xp7M6fL0bLzWzcYM2sAiNqSYclCLjRSWRubj6OJCfibxpXGQSbKDhs
5SrJNtMTQD7ogLdHy9sFdfIbos5/3pamSp96erJLnkijUc92AXxM56MIW8cTFagLkdBRRnCSVhZb
CVIv7eiHnMllUYl10CWxGQE/JdxDYC2YwkHozimCyGNxdAHOZ9DOKmv+jjjKS02ZtziXVfyCaEsk
RWUt6jQo2adFvCZ0lO7TMpq/y9g+LDn4H74iPO14vsUyy0yUuHVe0ialmjx2a1uZbFuGCCW0spp7
kC7SGkjHYUJkmkE5gBoiFphyM8pN9Pk7RPLbJbw+lpKZrShodLgfqkhxpZ/bZVP/t0sZAWO3OucI
IlLQnZ2WYuPdpgyehyRl6LeyX2MUXphvbarj5NO+d1eb56PldhWXmgo5XN5y70q5njXOsgBfSVwJ
cALT2Q24ExZCz28/+eSTZsGR3M5kyLNACBLyZqVjzQ7kvm1prpl8OSgHR0Llj9jPCerCeenxaA4r
S8wwB6PlFfyGCzMyaSsbrhvgHxrfQpqhs6K4JPI26Z7RMLKfIb+52Czmh/DfH190n3ZLKrD7ae9p
79gpw/335MnRsfw4/s0T8/LP20Ui8Tf8IT7wPWylh3fpozA1ekzQdPAFVgcvS9LdWrC0WNp6+LZV
Jre56/dcP/YPj3tPDChN2a9aCWldtysHZde+DW1gncQt/74+DvmSsZcmBsM3ljq9Q7F1ECzaSZGX
THZwswQpgyNKWZle6F8njLGSqcj4H9Y6EeuxJ7qQhRuILeQl59/u6qKT0Cm2tsXoTEASmfSke0VH
ws1inrBZgDQvMTidbHEQXRNaV0d4D9sd/1yPET5WDX2UcDPS7v/8FvNSKoqNtmKQ/PH5m4r0ZD0Q
RpEsg8KK2mYnOqRb1p9+eHWv4ozXgC3DvcNPp45UJSJqs755SBre28Xg4HwERWTlvQC5WFsvlaFL
uFousAsTKmtgWGMCO5W+YRfVhXaucCntrhMrvcp2n6/olRU37RKFsuHIThAVdkjDAMENUG0s6ApA
sxkLddU2w8VowQtEkJHw6lkcIMNpPDec2+PZ0QDB1B80ToMyMZjMLcB6l19490UiXpooUFAaISgY
8tEkzKhIxG1bjXbAj8VT22UAUK5a1qnqD3GBFmynozY7aupoM9b9w2ldLPaBwqCBWNFocqiyB+xe
Br5ODIMsXVeH27vNPdL8ZkWnNTEuJogoMIt5MGrIw1cmTC5i1C3E3LFsx4CXzVJuI7QTVrHJyV2P
OR5TJlmsQwyZLKnQzqn38Dm/3+SV41Yilk89tZ3+9se3z169ypxrDzIoiViU54NWS+/EtfsP18hS
AoMux/527jmqqcoIGzhLzrccAQraSr7XWr5wArnsWY4QJAlieX7zyTcHAbXX2rsLgEen5vbSnRfn
YrJanseM9zq1W0SNY0D5j6iCpPu6dbA3+a8dplDdsakLGwawuremu/tDfhs5zph/9Zn++i6RplQT
r5uF0kbFJ1hUi8ra1ve3LT0H4I5B3o/JjXCN8bxvQd7FyaJYxtwVuZ7QXAlSIYTmae8YP4j/JiJK
anEFgVzIyMUikTgcmqnda5muQRYAlyq50q4yq9zfqeidVNKJ/T2nz2tD1TxCTuCCqt3ntt2x3c/Y
FnDlxS2Rhm80m2MPLfNrEAy/nbQWm9tJH/NN/suaSmX8Sk21vt96Q2s6ehdELvmOOw28wbEg7Rvx
jOodvOSbAXgJsXNmAbXD51jHKlMsMfaC88xirS3jMfAXKiyQhUauHJERYlah+3NcuAmLlKElmeyy
jjbZcdgDRjxSXVCXOfABlKE+Pe+XDWlOboz0ofLw4m8nx/3T01gXPNc1abec8K4c66oK6RyfXCSo
TFJgHbk8N4yVXY2zYDIn8IQ6cMSZNXF1bIqEdfIEgVy7N0fR0W7I2dp5Rv/3AXH3/yPc3QNIifVG
/upxDXgDzSYWBy+LHYuvOfsdWlSbbofGNERsadQz/jeK3bLHHFSKK6fzv8LQssWVdcMRA+jjhlFd
JnDjxVm5HW+gzxX++oqhXK9m0LQ4DkBRc1RTh6iZLA/aM+xKVrdkmBZ7mOnpNcqjfcjaanIG30N2
s59xmhFl9iqjqp9UtcwT75t7WIVcs13abvuxjtTUAEbltCLVBrByzhJsmE+si3n6a9fuW29VF6k3
//w6Oe49Zb8RnaMCVr4TGPRBUEM3eb70bia4x7QFr4MuT7j7BuXpMjz6BFqfgkb2jNKx/3EnOdty
9ABa91s4JRemspmpNigLrBM3otfr1eylJIdlM2Ce1IoZxlULz9gkOtaHo8SqJ63CobW/mZw75lJH
FrPnV7966xHUNmmD/j5ns701LZLRGZCZNTQPIqdQi4vrkvcypkD8gjBAbB5G19+aDcOe4N6uZw32
OBO2T0LKdv8lmDYNbz9NHu08I1PIWz8ZGF8W26pO0KZaiF//rqw+Ege1iyy9qxSQCvkhbSaeQ40I
y2K92SnaLPMP23w5ZgglUJLSwZLUQiUih4Hhn8EWGsE7IOoTvb+R/lWxP6RZEOPw1WQZ+oWNL4rZ
OG8+xBz/DuoL31FD79wZLBXVG+271z/g0k97gl5ngXRlu2TLHWOvQ6wN2sSHyStMwU8OZIoHD0IT
D8rueDqHZibIaVEOsSghPHQUC3Jx8uSSU46bbQ7eWc1Vw0ykVJ7VRQH7n7p1ISF3h/HxkjbbLNMg
UnvoD72OWQXxgjDohDWrAVpbfEvlZLLW6qY4vCbsOqXrqOGtxHZv3Wo8/pEWskXQaMtXieUi8kaN
5mz5FZxcYLp3F/hr3IPGzYX0UQwiX7TTAEUUwRuJ2cSxs4uLGRKF+jFgAXX2l2MgGpq1P4aQBdMy
1e8AuWkHBLMT2l1n9wEX+k/hlaJsUiv7ZNDInDS11yv8fufxR9RUZ3j2BGMS45TzSs/FsVo3lYn5
kLY2jLyouWd0L6kZCEbVPK+K8xcai0aRdQKQtgNbkwmCxg8Kp6/C90pNZp16Z2urG9O2mfxegKYw
r1gtA5VFuxEIuFBApM3U1ULQTRVGCNbBKmuZ+OItNSNzzMEyPmA4dBkxhiUfcGsdBuIlZivXvcAM
xiDxBoatrkGNUzZkV8N6+e7lxkAMEmdIGnLmjnG9qA1NxQOTUpHrpMhBEnMnoa/FioO3pjsFQDYZ
lI5lX/kcW6ldX178F0yP5jOTxf3oXnEvnCrFo4oKuPI9rJzznUGBzogtBE9iWf1q1ugc1LoaPN6q
snDsLjt6NeiYS4M4Wq1wrDxtOw161GCcEv/XEuupc7UjMJr4xEIW3Ks0M/Iddzg7trOd+xXW4Bsn
yiW3x/foxR6NTCLRL5jlwFBjvmy4w95PfKjDwTBKOGXGBm6Glz+9aExLs7pn2ot8Phc4EPvdYYH8
dTKQhkP2tyCGE6LHdphYlD/WQ3lTcJRKW9Atm1UrYSOWvADv7PpkEt86mxSLzosbGjM+FXE14OiP
NB/tnb6GOY5LLaDHToxvxGZCqq/Zm1R13IWNtFS1gKHNd+jlhf89t6HMiOZu6OyU84jNhvkQeA4g
zB7DYb4m/i0Ci2AK6S3p+9vbFcNi25cvXr34gViS4esfv30RRTR3FM3mZGib3NmdAuz/rwDk7hvK
JmC5/TuKi8MMdFzhmo0ZjxQIeAETpLrdMpL/VqfFJtXQWtPwTeezMTSBre1SD2k8GDulVn0bt0Sl
x8mgDBpWBaMQNnHln2z4NLQBg2NFzZYQY6A45AAu5WJWsq4Zz2rP3hKEhUv5pWr3Sd3lNjtoQicy
iBfGJInvL9UDH17rGOBILwD52CvQqJQN0sA/6qgZTGPkx0E8tAGnNLMXXmRc2AjRO5+49rSj+dxx
o2JZhXBtgVpoUoVnvU/9Bi9fgeAkeMzl9QlentapAoo1t/LzWtOzBsfkE2SBkObYc3uf9C7z29AX
ijoY6DF6eFd3YJkbfGoIMET0WI6hliVmV6WOYHlyRLkWl4kndI8dgak9yzfXOR2hFqHKOFweKrbl
BV1WrhATFVdqlqJJQDnW9koZM8lu9MioiUWky9bG4Gbn4kh4Joo6+l4WiLFDJHVdALW/364scqz1
XoA89Aj2N3/tZvzrzSP+23v0Df399yedvxkgIrNYHEM/2q2jDhv1fdR2qeluDC2y9syw3UYlxPO0
4rFBogaOQYtMY0w7qmlWgiN7D63zz0eYZ9EcoAWuhrofs/tCYiM8ri/RWjxAnj6N3ak8ECZeIjuw
FULNfYQV7zjG8fmk/+WpaLRPvgyCXxzq/W1czLcL37R+fNQZH3fGTzrjp53xp53xZ52bzzvjL8DX
owa/GER+etgymvbQph88ojSfs6YdDt3WFp8Vhs4pN+YlfgfCaYBDHqHs1jd/ehkRH0+X2lEdeFlH
x03CBSoLAvtvGmJxWJpcrQzRrU3pqjE6KwfHWVwYYJdXT48pw6yE+EaeQkZb86d7tKaSJDbKsp3U
gYaw6kUzOBRLJZ0i6rLJSKfNmX6fXr/8+82Bnu5ha5p3m79mTSux6v7jkxYDkH7KbX7TiixvDcNS
bGwU+nyi9pvrfJzPriAUpeWum3Z8FLRk4ZCknkOA1TJONsV+FqRo9xfc0ocNo8v7BUVGYxf9mvsg
4NHuWhqN1M/cH7DHfcFdaM3d3yUZ9Ei4mKqWG0OtKScRDDMkv0rj9LRx/EtaWfJ1ozhRWAd2YWTd
OXyh6byeFGxG2uv14NpyMVqVUGRej5b42lBQuZHzfcFSvE3ualLZsVF7QudIBwGS17Pzi01DWRC2
zTYsNhO53qZYdefEj8wrtxnYC6on5fVsnDeU1C6gtaLqTL5OYt7QnXS9oPFJ7D2BXXGyhpIqP1Nu
EbFTrEjWeKBl4M9zv7k8TC7zHKZ+t6E3QNxAOwRmV0ttczhne8mAa4xHR7Zpg9n1fTfnoQpDNamK
Qw/iJ+MPEboRy4+bKc4RRJGcQHsstuWeV7HE1NMZNddpLOe6rbpDOMydbxfBcM+RZ0Kgn/LDo1bS
31U4r9N9S/62tbMsvazuW9rz3aWZ+/K+xf3H7uLcC+++RX6yu8jqRr1vgT/vLtDct+8sjnHFj5q5
Zo/9MvqAnYVGN+IvPMfR7+PGTeS00RNt7GqnceBjZLMCl0D47gkMqvXbEz+DWkuecEteyeb4jB/+
sLtZIgjZ1Z7d7MU9Dv84ZipKrmjaHUsnlI/EKUlUWhKjC4HsJHLGVwxEf0/eRyqvHu6+7dXd35hn
szdpCNphkjEWlN129cUiALsed+aHbplf/1Yuh14LJbWSNlVtIPasQddGjCIZ+WGTCbbPKCnt9T16
W5d1j6u1dWrseLAs5QWc3Znd6DMb4WTlU6fiASqTww4rrpjp4DTT7Vy+o7WzqQszeJEL9NL1iA2S
mT1h9yB70SGGzPUuBBNSuEVM8tHc2q2wopVDWaDxNBx8QeH4FpukK5/ZnQt8llNI5WmL/TNau+yT
eiuPwBBSPxw2ylUoVRxVsRRBkSp3HelJWZgGJlOqg4UpM7T/7y89MSqS5P46kkkxblCRYDXurSC5
2yyhxvTBAcd1bNvCjJ69oalN0Am9UC/K392+HZ0jPKe9qvjI5JqxyX02ICOSGEFZUcczE3WTrfhD
TQ5vHahG8jkLphrbxYlaNYwoZi+1gKA2xiIO3Ja4ufnczxOp7XrclbR03TryR5lXs1lipsCgaqRp
1Q8PN+sAegJryhE5pBqZZ7XJit9t7y/fid4wmJkJmmtvh/u19Q75T7Psx+1fXPqzn+TnI6Q+e4+F
0cr8HaatQST08U2t1Et/j9buxWo3i7A0qmR8G0XoRXwniS4vUXBf5wOdhPHA06i1dVTXn1lOrPVN
/aPlvWIfGRNxEAaEjsxIam4Tacwusiz3uAConN3QsE3hH0HVkLMP/N3kThLWaZ1hpbWcODtph8wk
228W64Pa2vF9KOeWW0Xs8OK0DRUBVM2W1W8UK3PwJ8aHtolpWByLgf5O22ztziNqKxsQ8HEcV/BF
F0ytGXd0GaXwxL3f2ay92s8tClp+cH9ycRARyThbAObMVl2OPRsX17QjSvfZ5P09BTi1U5jaYZS6
0P3WK1HetO8ZA9STeYe3o/OvpzTyEklYWRvEqga1MuTMT/A39i2qhrHjDGk45BuOrmg0zRW17njd
z2qZdl5+2c/7LsaHE9XJtZtXdeZt6W3HDnj28ZKI/+Yu6a5sTqgA/oTOLrT4JvOco9iWyo4a3BMY
Qy4KlphPi8Dh2UxNeSfZd0uuT1pVUGTsHE66Shc7PtYev7zezTBHSLabn5eNtduxNCTr/1IVT+gC
ernP6AmGXM16LF+2tYTsI4RYv6aAJXSq6jeZBqmzlYueBSOyfgh79u7nV33jkIwImSVd9S97y3wD
DLbHcKZix+TNmqjh48ms3Djv/JJ+xsqbMel+9+7lt/1kOjmafHE2fdKdTM8+7x49PT7qfjl5etw9
+yIfT/PffD4aTUZeflWkJU+OP3Px3HDCJX+YUWer08H5/IYOmcl2nvdVVOJ8egX7tud6hDzjfUud
XV02JaEmoPajo6YE39KSoxRHR0+71JsnX9DP/qdP+8efJo+OKFvS/gGSHnr/Ix1mSObaH/8k+Aqz
vJRC3/EKnpjyjmmIkuNP+59+0f/0S688ev+6uNLydtk5GVsQ4yX461uDVHFdfcuHVr8Fw4cwLSWi
/1rlpIWWSbDZg41mSuW/UQXxxMSDuPIYsAbQQ0ann5y0EH9oTwwZkbZ4OrbXDf4ZaSAsDwU1naQx
q4rw63Z3Er8abQavhqfWqQkirq65LEVkMGVwWV7KO8aj0j1TLsu/n2b7jYxTBMvQ4uGKPYBaqobF
NWFsY7Z1dWMLs32sJ5tqwTBVGTWGbYAYKdIgQX+YDL2+BXlPG0vWm0VT4Ug5tKe+X7BmPW0qmjn4
poIXGg1bonZfj3Hes7GuXweXcRrB6NHsTlkPk+Mj/vcRAcCGQ4CmSKQ4TmffuLHFnVb60cUri+KS
yiOawdH3IOam42BMF4h3b59XRsSQKo8gW/gIIiooZ8YupQVzwK7+L6H/9fV/WdI+edQ95V+9h0Rn
vEDldeuVulpdM4ilW4B01hT5XKr5CxxtaqrzQyjRUIIyfzYlA8UDN6njxcZ2EL1o8O4fRT2JR1GH
c8ZyMlrz+jlf+JHUTXDQGJ7O9Rgcy+6IfnLi7E6zzm98s87UORGLZdJiI85+mtWWlo82pM7D3a9d
9JwKacgutgqWp4LjqZ+MWBI3GqselVSnqjj8cylOlKrZ0hx97OrTPnJ8LgSHT5eob4IRN5u626ZD
Ia26bN0YNe5Q4lstOpdld+y8MYk7BIGoyBjVOy4YziBoxDDgGR2deoDKdM8NpfhaWjBU0WPd1myd
h/VFLZKfTUmrfQFjoovRVS7BlAx6Fa2lTxzobszoiQwCGAcPb8moj2yp3nbhrAeyMyqdkKCQnJxW
8er5TY208lvL3ieUtTeBZosLMooj/zvP9xqCbWqWSVlpjg4qd3+NanYSUWCdBlserdCrg/Fcabwy
WI+W/kED52A9Zpqkgb4SaL5i18XAb8cWstNhB1l9bx1+s9tVx8v4mpcfJMlyWMdvlL7zguR2nIqa
xX0sBY7UtijPG6qy6avym+V2crqX5/drVLN4OVJuRErZ1CnmRRpsB/kgP/qi++Q3b+kgP/qsf3zc
++w3X37+9Iv/M5pBD6z7d0wCz4hsRbiS0Wo99HiSvTvESAO7loS6JwXUsOYBEl/hXF/j8g4FabWl
vtpjqTc22BBR3PbFU42Ly7J9Q2e2vnplXO5ghUH8hJpgPChZpEV/v657cBpK0XF3VKeaM/hyfZi/
+x/pjjAcLnJEZ58WHxbvvlBCt7o9WN0CPyS8RnAGSJCHw8yJAvNh+e5/Ha5uIYPoIUoqZLCz8w/F
2//rf/8v/wWcg4EVQj2dBEkSWiPl6Bynx2Y9GotHP3Jt14oKxaxD1SD7iyUd+lRA5LoE6yZ+mAdM
wE1TxrB+1ZRXI/EzUj6aEwxHE43fKfyXYaP57Dbreg36KqijrUl+tj2XZup9mT/0qnJa3a72FfjM
zCUNUjbDHSI0SuozZRiIQTqZEQc0utVG0RF9ZscLh7x2wEXMSt3KnV6k3YuUTuNuFwWn8QbQqis3
g1RSRFoDgxpvhkykl2puuC1NbWh1V07XZf3bWlfz7TnNFz9L+Cbs6BqniiVJEzZIMWVp7bM0NB+t
57fdeTGaKLSIFJ60F4AX6I4Ehw0h0aL7NX1bJKOrYkZcFBVhcDilfYxxxvYx/7Ys+v8Gg+Lp7Aag
VueNxSHlpBhjCv/NnyBvdWCx57qEGMuVX3TNm7DshhnksRPEh4bR4WJtgJ6RBFwvpjyRvDlWt4Ka
QG3tNDWW1/vejcIEc449myjW6RxWDlZMhgxwEe5yZ+8L2Rz8rQeAZAyxt5fHC4b6HvKmbRNZwyog
mjUnUiXbWsaYVpz3sQeokq11+p5NNV1PhqHHVfZDBl/j1veIb2camAbtcyRSHFAefvDC4CLmnRPb
nqsacj72NR4kle3Y1MTNsgaeUkv3QQnEFv1DtH1JP98vx9eTAf5yrGH8eL9EDJwgbBFP/nCoRcLH
l4m685z2NAgu3eLafJuFWZRJwAqRwG6kYAd4qr+ddWyfivXsnNEHa93ltdnjK1CZb7iP67Z21pFP
UaWKvabDgD8crL0aa2+dbIoE3TbyPJ/75qoPDn6rI7AYrS+pIbeQ8bjLaLs0pI5jCNKv6nZxMSpZ
4SfvEU7dzpt75apNam88L0oPVSDSNSib7u7YQSAUDiryL2q7hlsjyMd20ILYgVrf/U2hi6HvRboL
WQVdacou7Oj8W7U51APQHH5Y3nL4wdwaB3tsENrRJe0sOqHpioNHy1SeNTEb+mlfvch8NlMAXuYI
9pvSRDvJmR4lKXWg5jMsGg4xl/eGGXSytq4mxbBaos6w0rmN9LXeVNnctR1pBK+FsBipfnMt8zsr
em/z9QIA43+UdaSCt+sq7CYvSWWdqB/6SxbzkIvO7pkpX82AhMRL1mbFgLZjT4f0WOardpIOwAwp
1WfiSssfBKRM/XzpiayhU5rVWVcRZoyF7JIu+MGZr8S+R6n/uilu+C8VTafneCo19dOwZQeh+3vQ
adz+A0d4jBVxx3QPcdmlSD6Wq7BrvLuEkTEeK5i/DJJU5KpOcMEVxxFK6W7RflBmjAkkvv/IkYVi
tzRJHnSffGpjra0QewaNdmx1tf/if0xPsLS+nk02FwZMwI5Q8k8N80jTGEwXQDlz4kuNtjkBMaf9
zM3sY50yHel29f2d+Ym5A05tvQDzISyhLWbnozFdMSYKZSWRRmeKe0fH3xA3KFgZqWU4HoOFZGIV
mqyZJyX2Ryh1WTddpPV1ZnY8Eopd+IDRC/JJuM2GJhmWT4/Tmp1p1qle1XS18iFhdscjv5Z+ZHHQ
6jj+rEx0eVTl7bk8du9sKOHDNtFdgtO1lVscKjvP2B4G6VIuCRZnH8x/i9iZUyF/u86Hvi/IVjG1
e7roCMu7xWhJtycaZ3kcQl9v4UAbjhgRz5qIQExTNkUxL2lBnFN2jrSpneqnvnwPxXdM93acTnO2
lje2S5IKrIye0y11wJcPIcCpGn4xjSCeMxltDEYqqocREfwIhjLX/ErHEZLGcQSo3/YVoKuPTAUe
58bpPEYFZjVrnCUjOmB3zk6cm+dN6b12bmVNk7EF2kK/4sDpF93Uu9z9OxkSn8XOHMMBenlP1kRz
9CODqbJoTaG11MZgj85KmO/YUjM0Jb7UMfFP5J2hKDXsF3eRBrQj4LPrizNm8QPORGo0yfbx8GPt
ULXOd65MPueOyr5HydZZbJUeHCaDj/lH+a5G8xkLVHV4ytvlZnTD0peLorgsP7pod+soLarISVvn
zsyKjrAYPclEc+xIZ0Kk39ymSj+H2eVX4CFmiAciSwLvekPzxeVu8EoNx9puJSat2SXCpcwKhwO9
2bz8sV3JUH+CsXY7jPUTjfFsCjNXjyS86sqN1QWA8zMw/y49HjGQcQX7Kdo5HbmqWlku8hqqQfkO
HRxWvwtkqOgobKcLxkzgi3x05fN8Sbt7jEFqR8CNanZCAZISD27U/sicCNI+2nRCLUZzZKFToSn+
uAx+Kno/E3wWefppGE7HUgIZOcY6bd6mkeGQth0EokJ6NdTYewxqbES92pWsZkLY8gRAbBriFFPv
ovPRRFEKivCrQO+pMTzWAV9uPmt7bVPxLqvdNCnVnY2TZe7ObzydM1cWUfvBWkB1DSMMjr5NX2tR
4MICxpvtaI69B50lmwkKaZQLG723Y7+7HMuB85DZ8C1a3G6M9o7Zu6bPDXXFVpv772ydjy5rOEYM
sk05m0GM2LAAvL9Z7LXOexG+a2WpRj7CieELc2Kxw0msFH7iL/8sRwZNgBosPCj775fKkQndsfSL
6mGDljaHvxYtdLwUG0fZMf3QMwm9QFgfm0MMPEQv30Sb9IRcqrov9Zlzxo71fCuQ0LVoMBmm2+WY
5nk4TNVGxj01irM/144u52hCdD69eAOjvUpttpB8j1WbhgEepfwTzXLqzCEV2zFFedyB5pFmV+sE
PfIHiWZQp6/imyhRzyyKjhMohdVeKiPB+gfE+UPzGk3Sd1yLNIcG7sPq3f8MdtSJmPnhw9u//IYV
dQeLAta7urPhve3GB2bZt7oKAzx/YsKhG/e/Aw0xzoCzrV7Cgj0pku4Ks4WEi0DwYRMRhoN+MIsz
O7/I1weIz7EACo1APLNygKiNeDaLP/doPZ9VkUo0mJmrECxvS7EAw1XG0SCyCJHuzuaNsTcBkpr5
KONikkiQgvg3xlmHszX9iKXrsb3HdkPER3P8bjubT8ZFuXnGoTOe43sneUab4Py52IZ8++J3734v
OhOzRd9cLdW6/CfGWzSV9egD3vxuZE9lccOXFrohGzZAECqmU0hIqqgd7VVRljME+RBPh8yZaV2R
akI+y9WOS3AB5+U6v5LgPINon4iZuoHKmvINjp98mZlscLu0Gatue8mPjo7omB/dqPXi4POj3pEH
ubnMr4fD9hgwACFSAXutRvA1oX/hRdtzsmcNQSuk0HHNuUDsbnhiQ98U1Gu+4XfMg1p2m1gFVI2z
k8k8ztloMr4YIRqB59/plrAWk63W41ZoxytFhyEHdiGIarO9FrvBpu40UQAupi2/Do9vLBg0Ku+D
dQZDBbe1OyHKg8ydRAuIAdZyxOSqvdiaM4QxvlJjKiBQR2OBqu4PMStAjJR8dRK5+yXFdp2AnYX5
uqKvC4p9EPTEDkNU3OoZ+KJtIvSu2rRjHExM6VZ33ao6e5o8ElbXz75fZf448edfb5TcgZlzw5FD
QpdT+ae1YNktg0EuLeZAujm74UncRgbfgLMopa9yz42owx8A638elgpM7nyClZS5TiDT2Q0Lb020
xtwiigFmh0MhsRX2NQcZgtg0uBeyoFPZ0LlHUpiUh2aYzPRiNNmcmc9541Lk2O660ck0lAY2aTtM
Ggv4XctjZnavKU1awD+hyXKnUfDkK1rl4PlDYreY1HYCU5bGoOc2OHGLstv0PRg806c6vr+3orVv
Eri46rV5HetuzQNDAd39YCV+KOR7xTS5GzgEwcLcYCbfQVDw4oZ2cGlCizTFDbOVhNlHc0Q3v6Wm
usXsETGMA7wEQfRsujsXF4Plf8TS6lS4vYiwwXsbHWDsYgRw+H/vmluZKAbuils58ZD8MTAjp8En
wowOLYqb/HmEMBJrgUthXmoI3Dm2KoxGJNIDvhoe7PDmzuMe5kHySENcvq1nK2ToG65P+C43Hvff
LVIUzTVeWg/ndA0hwPpd2snusuTnfCru0EDv+YTvWfjSybId8WLUYx54mljPo422Hzb4Sb5YbcSS
qjoqPp41qHZXy9QVj9Fxh/1pdPl4Up1GlmtXWzhazMe1qLJGjbbNXT/GrvQegVyxaiqwArmnTj1Y
Ln6HczoI4MqLbrS8NTaESGblUPEod3Mbaksarw4s8ipfeWwAeytnyVfJp7EVWhHll6//+dkrE7cQ
d2tDy1jUkrrzVpVKTPenzXO4OxLs7vlHJF8THm/QamUNhalHlQRbBiU3shdxzUjYLeQMUhej3JhU
UvvFZRX2cPcka1zpf4zPtkyqTiREtAcu2AudN+wFUIsRzSr7XEJ1CwSIMXiNz7qkGYq1vjSTESO4
dLjAoDtAdFvdsgkv9bmQOB7O7ElP3Mi91cDGUjksfIsrYJvRRavjNCcM7hccDU5h1mC7dtBViapt
iABrOj0QJeUbIufl+aBF72cC5VfraUjnOZQt9xZhwrgQgyTCMn1rYkwF9wJKb2Rqpm4OGd5hGpo1
H/TxfWXKSM0FjpYFm4YvthuWe9cCJPpctBBAdKa7YPJX/YcIYZtav9vZrk74soAYSgs9xVNsMiWZ
O5FOaHps1siEpfJl0rA2Q3rKYlIbD26fiUsaZk6a8kvmTenhL5k1aGvsrHW7xHaOc3/2ds8c5vZX
mz5xwKjvwsbE3vyy4smEAxwj9B/vR/mw34aUtEQThWFZO2S0zQFhq2eOPMWR6MZgfjD7B3tsW5ff
+U+e6vXC255mZ0Zm777zZCchvwHnaDZZETDccvthWS9jm/KVJamoqr2RckbjWYxR4kscBAQIQljA
O3gugolZ2am4zcCS27G2taERo0eX1hda6Gr7m0Uy7ci6jyAGNqTX4jvZ6f3WRyDyUkLCMj7IvfB5
j8Wg8yCSJ2BcyEnLB22nfgBKY52JXgJ8fy/uRJKKlH0kgqqu8iQ87HJBT9q0P40V58jHoJPtmPWS
l9PkttiqOeAtva8xLUCb4EhLvrugjZVpYHGZzk1snB+fN24M3vxfgXLr4bKDGxI/NJfjEj7oyDct
Yo9yPR/hY1cinGW+LnMerrpT642Bv69LTHyNcIRTjgYdt0wGYtizWp1uFcz3srLsk9S7GNyAe5cs
UXMM0zzMN/2vUa6D0cG93m3N229f/ty+4Qu9My9v5G2M579xSIUy26ZxtO/mm8LNZ8NmD2zq4AbE
UXccv/cNXZIj3oQ94mjxaWf8Q8na0/1oKoRxoYxLTcwzkGiXqnOL8WsNF9dUN7LKZX1SJtU2wKX6
abXFvJTNWGUNOW+CZE2i6kZa50ktG88zZ/amsN2fN4yy8hh16shLwVz3WOY/rd/6HOGY4CTEY7Ct
89KVcBtNTUsztUKroSqQ40m/e3zKYU3WM0TmGM34lKTLKoe/8utn/UhNwtZcNdK3fBt3CXxWugAV
te8nNwZGoaJCAstw3D89rcn2rETTAz9Ql39kc5gNliVHYkZW0jMAkGE0tuMNh4pRpXqX+nM1QxgW
12nUI/ZEIcWQ12d1PKM1ZB6W+QfxhKXkvaHGuB+az06Os6UpzazmiOUnm7qq+bUpPh75j6jj2bIx
TLa1o24mx8o+V4ui1vbmoaVWbXPA5hTSmi63uzpEVV2Nz6xgdkcWdSFSLEyT9wnc3rQm52y23r1q
RSO4/z1DrQu6PexDlxvxghltnCCxO0OhN05FR6poAAVwqk9fPAF40m/66d+7JtGjsKEXjBf/7j1r
KZAM+yGMKvuK3WHsf4V63y3Zxo6YQYh0/361+WGv1OKVOSHxxb5Pzc+eP3/xZnfNYRaW9EfS3kXK
I/QuQJlgr7/S+P0FIESutLesxz2s7LXYyl8xI9Wmpy05GvGUkUms6hnVt9fqJ6qGOO59BiIw2SLq
J30AdSqbpVBu/4ySvF2VLpQ5ax6TMNppYNdnkv1quqf9JAmuxqIyS8IloVgzDW43WIuo0qqzpwaj
uhkIYd9bjeIpz5qb9XGNcZpjTzk95HTj0LVSbaM4Qm8kbAu7RVUHm/rfwLjKWIcJmAqkS5Vqjt6x
WaJ/i8UtxOQSl7PGi6ZjcEVl8S0/OCunbLSrzNJ3r3+AsS28xGfzRiZFh7yZRVHNiZuNlSjHdcPc
jtmvbuJapIqKV/EYH4HtwOYSEEnauewjHsGZNw6AlbYRF5t91Ph6zfP1+BYlp7pNMzPNFKzt7PSQ
CfNkNNPQVNkW64C3yTtJTReM01AmQwuAWXBOJwuwxtPzN4dHg5evEXBeDdkWV9cvnolmQehJl/qz
gq71oUOCFxvYuPsZnC0rIGPRSCXf7h3YmkBnNCTOmmEGrLVXW+U26ugp4dgRLxf7SUrAnUjzcxz3
przfv3j2LWURNy50A7kkcraV4UTazMawcK2Ak8n4QpA7DB67WMA266+z5BD0FhGGSjYNXesY5GyK
4UyKGYlB4o0KpoDXdIrms8G2/e7lzhky1RmPhpz09cBdcE7NA5NU7e2kzNoexRc6PWB/le4kxjYZ
6677alxtK7QrK3M98jAxms9ME3eie8VdcKq8WczZnGWQNCrOaVEn3S4lhO68Up/vSe3b2oWO265O
4ivPq5vWHfh2cgvJ8zBeUlv74e7Q0sDz+DByBo4qxNlCsS582E5AOYM652HKmZe7sbakYYaHqS7U
ZtdYXLwsJkKgrAcHh0fHT55++tnnX3z5mz1+ff7FAdw+njz57HP131ldmoKPP/9M0KA/TY6/6H/2
mUXx661uDyR2WbkqTOiz329pxDscovS497R3BEdHOnxhmY2r1mg+O19yKFYWQJaqmp7kn3zyCTfh
+Onxk+TPxcVyeesMyPHnT75IfhjdJkefAbH66RNGFR9O8nGxHtGpXnJbfMhyD7Bcopy1jr5pJQZB
DS8WswkAUmds5kLn2ExUSqCq1xc5bF04mQVSnpVamgCvd9jHnncAO5HPNR77HIA5cBfw4UeruWr9
a/Kw/c1PX9HC/5qBYx/hSTDMvkaQeXpx9I2kAVoxJ8q+SXyJeIu/w+Tg6/fXj5JH7yf//uRvyaOT
95P+qSkTVPTr3sPsH1pZIwzjzGOZDm18tBGwAuD+JQ7vvPFku5cGwb3X61VtOhzyXB3TXPG/P28X
5tNR8n9s5zS5yfFn/Sdf0uQTzb94XAGZgvUx7I0dvSiuKQe+H0gOQSBlzNiatkukt0h9IpxJXUXD
iRBijNmXx61+TMo4qrCPJT0EdPWECqXNcvsdxNpLy82qRo+/0XZh0R57+imIKTfvp9bBXVDU3NcA
gjqGLM3paojSLla2JMFD61Q5PVO+vOTbzNHBDphpPAwh6RkuZiX8yoe3+WithYRQ0//JMNMHh8OP
+EcE5pD3OZgLuhnwve4ji6qArhvGqQ56PVqO5rd/Ecg2Hh0mZLwpR0DAPlfYXBCvVHcpHeYHqjvj
G7WgETPENAy/GQcP31ClE/a6pbXLkhstzmbnxVaNjgwfZtyHFORZqhmCfVPA6HOeQwMLs2F9lX6j
olVKoUjbdCW7qANNcxbdA52k9eCsZUV7k9Ht3eknlP6JpGeGdZB4SYjScb/Zq3u77hO3sN0Iq+hJ
MYlcpP2UpSJUyh1WmNWa5rLDNqIENO1f/DhZqF/4lS3rpRQtSeSgVel9L0OnSh+vBKjcVM/T/men
tVZhptCCimUaWnaojUQdmZUOhrrj1ddJjjr8f96t0+b/Wgr3x4mr7RKLe/CL6mpAFjflVU6fBou0
EXtYMEr7jR5GjUixPdFetVvv3n7X/TL0URLYQFuAD1ssH1tZYxHW0FtL4XjfsTAExeoWG3/otdav
zKTpSgjLxjrder1yI4BfXprq4NlZPc4jmJd8WL/7XwxoqfzJ1x/Kt//bUNBTAa4CCS4zkXqfzIwZ
s9zlgMNjkfFUIcw/1ctWHfisM2QnAFJln8miPLCOj8Q+2qfKixJrStwcp0vhS/SDPvr4qwCSMwne
5KXgwLz408u3wx//YJ0lIcPaVOnYSWbYjOP6fVFc/pzPR7cHFrBnuNqezWfjoXjSz6tj4cclezda
KWWpEW3hU7rBXYY1ZnAHGSVz9UXYLif5ukSN1rJJS3CFH3NW4RONgFg2HaanB78EVzZlKJlmzNBl
0WUUoG6xlBXriQp3oHEuC85XLJFrJyonI3K2AUUr0cUFdYj22U/P3n7PgggaH5ye54UYJlErzy8w
f4oaaCBPMx9SMI6leKiewezoe5azvROtbkZUWGlQBlqyPH8biC6syEXO2fN5cUac99A6hs/n1p/Q
O56iKfToAPx0cHKEySNAnwGsofHITItx6lw4IwWFr2z8nfQfMIogzQxpsLpN/TG0bu9aQnsN7WS5
8R3Hf+Jvz9bn9rOl9+ZLI733C3RM9vk139P4V8VpnkO3U1yucceEolPKYRALH+HFpKBCZOdqBoGF
WS2yaGpxwx/KIwI5BygyWbSh2DRs3kBcuNRgy4M3e3lRFVgTDbipq4MSMTme09psHDmRqLI1iyvm
UBlci0lSy/irSLLgLqZ8twHC0aIaEoE3a+FVq+FKx2r3Ox13J/bSZgpmW2/nHpnPEWNHml8zYki/
qkYmebBuP3z4YJ19zRCAti1Ea+wCdCe+medwgaTClTj0vlrAJ30O7n7UqgAoXsow06vIELYh4Vqr
1rKstX4YNsRRXthUnaQ9ZwC2zZYIWBaIvGwytMs++II1FzwpApfkLCubUtdV0D3fIopnQEYf8xXR
Ww6dPcqO36Hm0wMwmiEuzrqsUJoMzldUIcpCAXtKNiH8i9Gh00izsaRnrCwcgWri2Iyj+jh9cMpp
x2yj7EDZmThlEqebv57Bxzgz0FltkyPYigr+Y/kUd42sFoNIkZ3ASxhY44MKfaRaqSEVC/e1+FTq
QNQXRo/xX2pTZSU+fj9npQMSZvvawPgGebfLu0bJadV4jju969wYTLi6Esq0+7haFk/LAqR41syI
jre2Py6vXRxVOnQRPDGClxJB72EjBtglCyhTadY0mpbWzZdRKu3U9hHdNANyPVWQ1aB6H8VlZ9td
B/brnJHqYQLkuq5fj8S/iSPyME4+I5s6GVmJyAze9bpYAmUKsg/r+AYAfFpgTgY2yn1sTKBxQoxu
S5GG4rYBvXCppbrVEJ95jTDBjmc1NRVxJHDFqIJPpT4wKpaBAO1l5YM2j1lW3hFHxS4pOQKMhmCn
nqU6yNpa3doc523XSwC3sAexDwb9T78o2pfLJ3FjHHYhOFCcw0SDKdV14SYSlMRlcoVDIahwgCBs
Po/nZYQeNFJsS+TH86ZwKXXQrAacxXpMLRoPrkS9cQU4jmrkcFr16XJPgopoXOa3/kTM66oq1GS7
Pa4ffhwvaj5X5ik+eh6qBFLXp971mFsilIqZbbUSdEZwOGTszbPR+PJiNskhgPVxyWZWvsVDqmaG
imtnynMssceXsvIqggJrvyEtu+ka3mDHWW86DPhOgSnUwmKWD6w11ui8s3pgNTO8Mzj1mzHOl0Q4
1iPFEpHRPpn1T+NsgTPsA1nW8VUmwoB2+vrZDy9+ePb2+fep4Qz8qQiNGemga3MvOs4YdbRa5Xl3
xAEy1T7//sXzP7z42dTMeKFcbIYgK1+nu5qxO/CT7diPu+vYWUUU79EbCmAeQJ/yKDnecxf7jauP
ewqJSbRV8Q7rdRm4cW44PVgIggDIUpNAem1n9bk+V7T8UFvcmPzOHcUrdfc6zXaRhcb1iTuSu9ZP
m2yV8b1uNh4OxlguUzDP6Vej4ZjVeAi/J2lS4cfA1cNKpqomOSokbxbeLw1gDdubZ7XTKj7S1uvd
nmnRqICVa/pxx0xphGiyeD0q5UCrwBbExRyv6Ovzgn1Gornx3diKNRagCeIlCCJ6mHuzWOkHLLDF
6m2Qyq2iSntwsM5vhsV2Qx3Kg3CbbQmP+v76EUDf9Z62Xf7MwvodopoNhxPnueuA4dJfE43NVBfi
iABn4782RUDCpT/9BKZknAP6M9QHTARIxx1yU1YNnQyRMIK0ptggrekINK3606EF4Nd0PMvY9cfY
6Krv16Tt9ba+01sKNNcyhr31jW6qk3iiZjbZyBmbJW6/S0WbfPFTIAgYXrtzbxcdhtyQMKk7CuLC
TijpqWocKGv8vNDlOTFymWoZ/3LJoPf5uQm0ZARzJvLAkCt2Lml6eRRVRZUe/IvsR4EsTTVJ6t59
YFyj/syTLfHEap4iKDLicCa3Us9zjb5EGibpJSSwcTZj17RUd3KahfdOU4TRdtdvoHz08GKEmV77
+OhoH3+QjfGV09b2BCeDa33E5H6WxV1CfISnPWPI+yyA2BqYFsiPILq46JNiAjZiPSHY1jmpJxhf
oCNQLY/m17g98os9RLn8xTxmB/vGTky/qlY4Ha1fp9ZFWTrW8UUrXHqMpqyqu5PXxX7oj234bxXv
sy+dDwdjkO3VwXZYzCcwisli1jXyzYyas/Dh1rfJLa4qu+fwTf8sz5cc0MLESwom0BHmiSRPfOSo
xVqWjQJSo5OUoL6gpksH+3bBoOFOGAmJMBUhjFOJazhdumi71s9b2pw1sOfMdVUNjty3JThEoJmo
4f2aiWDE3pYJ6RjgxJ39WaBDlUw052eI9Mbc+BqySX7+SrPRyrixzlJerVle9DN3y+pP0KEgUS/Q
t7i0tRcqbJrya1iEoDS/C9aaEumc2++FcTxwRoIWsWmKTGxtPat07f47A6M7nwRiQ6w+c/2+2XRU
Cljz2zdRLHgDqGu5FwHHwELXAwiIsePAUV3T8VFsqDYJDiC8dR1DfrRq17OYeANE2ZvK86l15Xvt
kcvmQ8j21pFJWn/jjnbHF2hR4Z4vTkU2+DRuiNuxCua5cj5gNCnqz4D+l+09mEZE+IbTtLnqLB6z
vApLIwUae450u5l2v0xx4kjkNR/Lvcqmv4wyN4slX1XBypAWV+GztGYqguGLIr1VY7uqb6SN4wMR
W8XNABSeVZ/NWl/6fvkmcpLWoYsgXirSr265WJNtoBn8Qolx3Lc8Lq2FuGktwKjc1AskekVfx9Pz
piLtcrNVaxGhi8ktqlE+7+VyJrwpHcsnGiSqdep3Q/t6F/JHfOBbvdVta+fQb242v6h8yt9cgbIn
utMrUMjQ8wvRpxQopL5SXXSygIEIeZyKpFg4yGwXU2it9qvUUTi4GmJANRZKXSqm2CuCo1bHC6kW
jJO12twKMJE6Bvz1QI0rqUttmahA/eUuWxjDGVxQjRNKs+TdoILctSBPxnyg37e2AxgxFLMj4Kb3
+YZ7y8X3pjwDvTOYluZzsWKphYdkCwuVhWhG5s7a+jDQv1kNTEEqWeVr6LCGGpOhfXJz2qHhWPJJ
ob5arlVnc7Wi5QzrBSWcbahJm205UBuymIOHR0LQV2c+ArgmGw9YLjYQLVT2Sj400t1zdteAe8X9
dzzmUrndAHN+jK5+85Fa2CudfbTL+0jzSETCeYQLUe8gFZOagbLN0kzNrkEWbW3LOZpOn0OoXfWS
RUmXjtU7IlnTyJYer+nITqXY4LASFxyWgohshpWmUOUCN0nN47g2qHY5foNifIgPDjfBwdspN1yM
US2G7GINLUz7MKhnRZPlax0WRt/ziLqwJRxFmD5KL70xpLNODQaD8847z80ZHIzS3HDsbhHEq52s
TqNnRdWS9sP5jhYeu6exa8blBJXyMaks48o3ByjIso/rSbwLeueLdcJJVIjTgN7X7NW3tJFrKA//
nRfn8imNius1Gwvtn3ZsyYcJx598jNX3eEODPSmul5FLIBLD9SAc3oAWPNzB4MyWimfC3fQHptXt
Kn0rlvPb1ml0Hhvq4PoZLbEaTbcmf1Jtl8ZuFW56qUWYLxWEhayV1OWQLXPVdF7JFn8uverXDB9q
trM3hpWIuYXT1BuuSilMIAHRrFlTPWigjrDKyzvc6Jjqli+SWp1s++AabyJG+hxlTGZoDNO1NB2N
du0q6tiSOyujGn/9m9W3D/3XPf10n9QVIt5WGIvuKlwUdiX5Mg+HadhNQ05CPZ0fz8J7XTfP4ryu
tIw2BYuH88Wq9REAdky1THzLqqiBA32mfLZBhmpV1TlzLib5lZm+JyWz7JX/vTekIQSelA6b7CZX
thwYOE0KuGOp8/toU52U17P5nL4a3Hi6zFyM5tMuU60kaMyhiVsFyePmYluiZPaKZ8y3Gcc/LBMn
QvoEJyxxWHN2dR4tb52yxgDoblehQcajFa0SGMaLVrfcoGlAXuQ4qhwajnipEZzvMw8pSq4dfmT2
xniosZirnSRVSpFmUZF/PUprEAk+5GQb82lby4vtBkdB24li2yy7r3ewDj4tUxXbVAEG5z7Mew1M
W7pCnXaGeI+Gz0eLs8moHynHm6w7e2NZPu6GZYPALjK6emrVz2lEQOlyjWWNjXAD5MRD6eJtzxgy
mDqbYdUodUgW6XKfPtBqPHstEcFzvMH3ctKbKJARyb2puuID5WJQG6fSHyhnSKg+OqacMZEG0Lsm
2Y8mVDbkRAo49et0CvEnSOYRa2nQpuMGRAaSgRrsyCWcOv99tzSWQ8q70kxtsJFm/u0gJnxx+EcI
iLyz1jQnIh53shlhBvAvDkMMZbuD7rwHg2m3o+EoQhFTo5oOvmgbkYd/3zU+WJTCtZbjE394djt0
kMtlkmrSm+DyJ6l6w0W+KMztN2LGIxl6uw157L7lxJ7dkRvhRc8ptnrgv/l6LZI7p+J8eSU+Q/Rj
tkZgN9+xgl6ftH76l7ff//gajlWt08rDqMxXIjP3UDVOwujF7YJBcMfXE6I9sDG8Ekxcp9BO0mpl
pw5lurw+aVFCro3+1gIO07u0432phKcs79ueGbzBn0xIGx6OfRGrZNQG/uANvDH0rt6G5ms9TWJR
vhOZNI4iaGcJ+ibGk+knh0c9jik1UumHs59WT5rSUQfddGpZh4uzRDS2Cysdb9eAfko7gVjPRXc5
5t0K18yeLE7IUY9hKXjNE0jfqNgB9B5fOtVOn8TyPbkzX80+wfFgV/91j81H2ZaMeAvFrIDpsZ39
6ZP68hnPAXAznRDJZRnzfLSBgItN7a9ny6dP0prbB18RUFfveuSbRceAbqfHPa4jaPn0Se31jrFe
f+RYr+811mJcRQ1GlKx2JmbfbMUUNF4sq6gLuxL+wtGQ/TbZLlZDKVn2Mc2RTOyOlLzFNaUHzahb
2dq8tY1tG5u1ddxl1qWF525wp3gHOpeYqtWuuG+ufVgDhqOjh9S9ioQMpJcPpqtagLh3UC1N8hes
b4xgsGrIP74V4BYjBHdTAAfK+GsUU7sq2EJluupkPjlc3Z7NDEUrx8QxbISVCnliuJYqbZNpgAgc
efGyXeXMQhh9l6oyUTUluYPuFxY0pu4l53FEyjzbGNSBZ3IWRtmGbdc1sIXo9lUstR56nNLKcFDY
xsUayIWJnrZwXQkKkjiOaA+GOm07GiyJUwe/+o6UnwX64WqGYFW2pDuPAsRJa+i+mffOewhod9vj
Cy6VGIOCbKi05hHcgaJ6WXwY7bi561W6vITPsfaMZgy21HwNrjuL842+mqdgYW0uCn9lISJyDsFA
CJsPnaF8CXhOd7VB2yeJ2nUUhYjvGFepivVSte6wcpP3QtCyxmUaDqhOo79mnSY1xq/9mEWKfqcV
esI/1ZWiD9bZPxkTbY4KfetKVHbGYit9YDvT8cjkDW3QS4H08q7IEtSzYX4QSFFSNA5ww8JNu+O0
qjBsVGVX8DAi4w8iANBdZbjcLs7gZTiEXta4P9qyuqnPJ1zm+cqAQhYFbPsGrt1YKHhzRWUDiQay
6iQ1CnjYIIE7NHbCkEIvpzQEsEoyGtmbLEhZF74d+pN7KJe8svTfOI2tqhn0Wp1aQx1Z6k1g9W8+
+V5lN+JR5iF8RaW0VRtWLStePTk6veu0kNMpVSKY1uMMl6vR9XLorQwJZQv9IrwfGecJ7OExon3/
WhvUI5V1ughqdgVfx0S58WQlrelye0WJl/phL7CIAk5frAlSzevG+FheFZe5GyAYJmDU8U7scLbD
5/q0STiLlHgFbw2r50dbaujYlnWShgjBYoOPXkkg6GDU/cdw4tzg0LunS8egktkLdSzWMgVmiIh+
PO0dpXG7WMDQtFa3q9uhhqJn2PpWxnLb1uef8tZTsmSuB0RFxhfEprV3LQCU2f380+RstgmCgPoQ
Q+61A2CkdH+hsz+NlnwjDjWm30AO47Kvi/UleJfZ6Ez4Fynkm0+a63KF++l0nedn5aRhQe9dqy2m
UjsW5xz4K3pZxWT3+F4rFxffvo3hPDG1Uq27nLTUgf7dS7gqSeWeEVTS0+UFYYW33mqSVqS2KEDA
u5zkE9xMdl0CqrjGPclhrQQbePtv8wbe3liZv3z99sXPr5+9wiR0cavrSsFyVkJ5PgYYoW5Olpem
jUFoYK5W9MoRIEFX7E+TdSyQRt0GmEYna3TeCOBYXHQCz57XOTwqhA0D0LIjVQWFEISq2+lH5tIk
z8bY5ArY1J3uf42FmVxBYXf4YzsQ0gZA2oE6qeCYGevKRI4XSHQ+S1CeH2flrt6KF10VSrWyIpkh
rLBCexk1e8TQ3evKoFHzXymVWVhqzAFqI3ly09MMPm8RNNezmaSxkDxGeYw0gD0bpA7v9lEN7MAQ
ZhkDrrdI3xu2suIiri+A1DwRvF2DJ4ajnttI8xZG8Q2si6gYt8NmgKXl4VppdODR+y8ariDWqx4/
QIqFQUztazCC+STu6WDML4zlBXONAksu9hndZJ5vWsTunSPGAC3Fooy7AUf9fpTIWVS6hgAyHqtq
dA+mZ8b1Y42QXy28tuHiqK943tMJyQDrmIWTsHu5eWB3vhWjDM4mCn+Q9vsxfad11V8HcZhR/Pyu
IOGBS66z86od/2DdpxTerkxGHOHkkzQC6t223ehkscDQX9eAzWutEoXbQuYYIdOXubvovZY16t/c
wbYtSuZZg2euQ2bAbWzXuU9nWvfaxq0IncFeu2vPZWJRv2LH5XxSb1c+0Xo4ikjcC8yUbPvRijfR
j6HR5GMqOzZwbiMGcxW+lObtTWO8KUtjLUyajbr6dS+EZtrihHnkfZtin6Zm3w4MjWoAZOBCo5tM
BBlauQ5JROIjH+4uQcYv4nPG76P5bVAJtLFjqupoFlfxSEPYNMFmfWJ/1vzqvZXhHoKiK9dP1toY
7Rgc2Zbgp7RlcOTVOJqbNuO3bTce7DKK1B9q6s3aHKgpkym2ltAu2CqlvqolNfVXKeWNs10EvykY
xjrDIzAJ/VPZEja3AxUVK6IRV8qPe1KvroaVZbloAyHQwDdHcG8p+6wQ2RUx52/zm83LH13AKhmr
oYF/qWsmngTMiw4ue25KArHzlQJovNti3jq/zayy4rgXu31UkU6oVeuRuOzziRGPvXMnXsbVyJpY
mJ5XApcsPjQ9c8lxYWL8FGWeX7pfpUuIrzGamyPd83arwzUIXgUQbQRrVodZB9idXc3QeDFKQjAA
3VxLxdpZuibtxjE14l9skTSq/IHweedK8OV08jkG/6QrZBAakZie7y7Pz9RUtPww9ig9H+/BXomQ
xvGZdudhuCaOsFg09dVWZGVf/uw16uie1HV4Lj1ujMSGMohq3YCV1IZCFBrEc4jB3shlYdJPDBwP
58l+ZRgsbmAIgSUQ9Aw1xqgSLr+jwzyabqxX83QZdEfBbswABmA3DRAYUgqTeEbIr8ZLdZ/LhkFz
GSwJeTp7dNw/PfjoTmtNDSusaWnxJr0ormFJeszW2U9qgrNQq1slVd2uo+kI9dT7Ll1L0pwBcUZj
SQRaBtr3rnXotscs7oVstGurLAsePO4xsxBsueYlESgwaXh9fp0W6wkUQQIL9pvJ0wD1iWlK85vR
WO8t/Y/aY5ZRNAvV1Lpzf0vlmmWfiiUDhmm2qTKYuu7Z2iY0ML2b+pO1A+mQ26Sp9+lEdCV46yja
Zdzv9+lytXoNS24T3xcdbJ3Dxh822JYCP1hLFCE1pILUlKeZGvJh8+5/MIblywIBe9+ODgQuH/pw
vp+X2xn9TaBe3iBqIfzYKWmIhY+0OCcdLHwPf96IkuASxpj5Lm6UuY5Bk6lSsOGQgczB8A6HYuiq
gGkyAm+I134rgDfmJuZicLA5H9pphCmdpGWyeLAatHTMe4+pYPSy/GbMEQxHii1mXgAjxuESTAE1
CwyRLZXbs3Iz2yCQBkSdplA2NRi5CpbEJWks56E62RRKMPcU+KdmVd2wIlhnA87La7piACiQH+6t
Wa1O23Gp+ol5Pjg4+K11JVlfEst6i1AgsalkuVvbcYFhxmq4KuD3PxvNh5gcVmYGbjI+A8ampxI4
20Rb6P0+X0rsgWC8z9lwzskRE48ZJRklBkIGWkE/ubUxJxBefxJEYDRHpp6AjWiGGBV3Wo/0QeNf
6rcG+lqvT6OG764WCb2+1DAxpfd+8TxUXsHhAmZAPdlTtJivGK0PpSMcCztXclhZ4xYiKWKLOKhz
Z58OYbckEcIWdKfJ2csEbmvSGivKZQmqNAdau3zi2f0bb9ierET43+a++sxsI1siD6Asx0y9t4RK
RRLce03vHH1TQTgBdw1dla8aPUcR7G2e5rV+KEAe9bq8HA3VGTwht3FePo/Qs6mKsVlXSm8cE9f3
oOzmFLkHdY9HQLaVM7UcikAfglCgflrC3sn+XmfOr9Aqn+TYrDvJpT/ZVXX+jpSpu3OFHwoDozik
yzwHdBa1aJyL5xl3kP+U4EJuiZb4WhktBTp+DqJsmQRNzeeeXqWNk41/RNjOfqeqP7F1eO+4WWzq
eUw1PYzm81GZ29zadX+YeHAc3amC+VeLwSbwJruczm428I8b2I0pOeF1Js3WFEZvmdqJNfECqkXC
/ZLemHIlUJG9naHRUJu3a2Dvh8lLpdstAI7fwjmPEi5KiS1vuBNZazAPlcsnLR6JUzlaOkWVdOtZ
buDYB9WZ6nErTbdUXnd351Ppw1UVqolu4gvQ5A/Xb1ffC+9pXqluCGUXUwlfs53PqzhN6k5xcPDW
xpM8L2i41LFcIq0Wl0irLo9Xo/Ws2JZOwQwaJsF/PV62enI52l8Sn6jeK1jxuA8slhik57xh514o
o6Eby+gK3indrsayBZKNRi9ioXxacwmQOEaa3IlgdNSRkEUp7VW6cdFUS5rZ5raXaij1SPUfpPoP
21m+2bdyThyrepLfp+q1G7XJi9lkwzXJgI4vaPid+sTWkRbliJYAtVU+V2VJYzhcoRAzvfssiM+9
lTDmI+LkV/l4Np1JDDEuI2lPM5bl01QGPU/bLzLGkuwk7TKzCon2TTbVHO0/ZaoLap7ruQw2WiYI
3I0j4EetqjLsCFnl9Foh0BG21khMynjMtB2N7aoecM9Jap4fKjEyPe0Je47DEKjDpujddbajMZuz
llMkHyX15Rlv3+ZMk9sGtubF8rxVyz6+KGZEhAYn+p0OzwsegYQ4AfwXV21+GsGJuXVaK0G6Zgdd
vRsWcAtso8zHXOBjlPNYCnm8LHZOAogklydLB49dfd7Z+2oBNQU4q9or/uRjhLv4f5r7th03kiyx
hQE/LA3bwMLwczYFgcwWSanU05gZYkoLTUuyhe1pNdSSx0Z1gZ1VmaziFItJMUmpqtUaXx/8E/4X
/5dffK5xzyRLs7PYwaDFIuNyIuLEucW5AB/xUEaKgywowQq06IQVRI964x4TfXHoMUlje0jFblt3
HdJt1dijocZtB0JDW04kdU2HMABs/0PsSus6oE6bONzymidaCn18HPxAFLJns5KQA9kbmf+1fGVC
Q8OooWSsuVM/hruTDM8j6lcDI2v44JTV2e4io4BR92s6af7KNx5f31IAwma4LTBULXBaJ4mguUg6
8nLyxuBdjoHjoAayXA/7J87cpxlGOMCIUYg6NZqgUztogFsREdBszoc0lTD/kQE4N86PPCss1D8/
87UTTwB/cRrH8BS5sW6pNpv22q3o/WfPv3/9/Junb54/mwpRc0sFKk3NZILgauyzuyeBUMM3bBDJ
QPh+II30GXHUT1Yp4vbH+imZSNI4nUtjrPDDzK/FH8LZ4AfYuJ+2U7sD3jALPWzAm75zjsy108dG
v/Ws+4d+FURQw1fiThU30LuEbSTrnIUm9frjQoq9elFeIPzVt5bS15Rjn9OKyf22Rgn+O/JeWVZb
Jiv9CW+JOopg48jbxLZu4tahZ4lt/KLf851osLnvuhc8n5qu8368+om4aoyk3Sj4frIDqDfWMSGk
mK3v10pJ6QYFvpGp0H7/RyOt6lNVksoH7/ggZ13CtbaJhe0oT47DYufYmnPgHdoapQXWjchyG/YJ
u2DIkElZRCVzQreDYtv4Cfb1+43UcOCVU8x1lEnRo0HkMJ9MlCmu9D4Jc/3UGdbtB52OqbbjSMLn
/UfmPr7vvC077B4Os3asfYziQCo38vbDBFR1NHEnLamdY4JsccCQ/jOi7iNaz8yZew+cEgXgUrAU
xwp8eZwJYyjsCzF8hWPIvYDhXUvRJZXz/jgQkjuYZoMbknOZXuDfzeATGb+wrQwQhxkUtpqeS20N
EMzqefVSbVtf6BvOn7EJy37JVn1xnNjAVFHrYIP5g2/x9m+corqXhZKb5OkzJlHF/+2edPz8QTm0
U0B7gCJU3mtptPG8Z3hPJXsQBwWqXx39MWJlF13MU+lzDap7Gwfbzb2nB29Bcvt5kK4VcwvvOhO8
065O1MJZzn5Axo47juzVqvpAy2gJdt2HaXfZBrrm/olZDsVJsr/8km9sVNHVLDpuGiKBWQ4/7ycH
FT8FfDL0H5+5mI2/NQkUpO+D0Z3A1spd3GcCYZfc/3GDOkDrdLzsplprtCT66W4XW+H27bu6f508
qA6XnL1hf2qZW9pBn+P+cT9KeNMxsJfUxTlDUH3ah6F2toUTBiTJCgFZNsAzyTplUqSfo5tCSyUg
PA1t4XoaJnDfVdk0quv569evXj/J9Lwi7nAUgciK7MypnupWOE5dyf1qqqOEfv/t2//w8rvMDj/V
eFCeYJQncinQmySFKGXXxS1wTfTVwPpKqJUEcSYYbRcMIb5C55fZDgS0zXa3KrYVvWpi3ruqyerd
Ru0MXrXPsPtFsTlb+u0xoISrmAZn0Hk+pD+HO19W0CxMY9nEDr0gHKKGLaaX4cD2A+ng5DTX9MFh
irLYA1/TNVNkBMWncPnQoBLXPSHO7PGL1nxNv64+OWfVnAM83CKtR82WjbmgTBf6ls2Bohq07NaV
ZaIch8m4STAV50KhO3YLNYKXrof2XQWaL80q247JZ91M6/pulOXShaWVH6VkKwWi3+88HC8ozdMz
RX/l5Llq9HDza9tb6SbLbtNej70wBNgXq/F9qDclTdPsQULqhbiXimwQ8ojzop9ti4eEaKP6yoZT
h04R+GpG4cESK5B4zPTpW4Q4Viv7Hehd0y4/FQ4LQ3yU+O3O0+48cYoIkVPnZR7iXhaw3bBj3Ml/
j8YN1ILu8eD8q1FS8M8DQNoTnEN0Xof8OAAUrlaDKR72p7RlqStUJhhsgxrPnqFa43aCsW4rfHVN
DddOOhpGCTnHHGgI/pWmIx2Y1Ik1KaLDB2UFnSjQMkCTrvYdeNYqeyXSHHSvJyUxntxv0HB830RC
Ti6ABX8obieLMt+H+Pt2IJwsIe0crIX4oWsqR7akh7JaviEP6SeHJ8dhnKQ9dIzb5MlAlplMJui3
f1YvJYFRG2TdzKHdWthOwfskk/ZDKo4D98KLlrRadgytVujOwU2mcScxjMwl9RSizDDq2YceMadd
NrYHHAAmElHIFMw5Bvymi57T5gpJHzwfJOQIYbcmv6erj3m/iK0FbWlReta0kK149buQhQnX6znJ
9WoyWS0lF6gcDjlM0Y8sMuaJaNBUFzU9BZ0WUoEwKYb17TH0O5hX0BpvQ99XTR8cm2gm/3ip4BXj
j2cC59UnpsG3iexhdr+UJkiZ+JO38SkMD/orZmOeLP54KHal98yfIiziJXgl8gCxB4dUdOTjCtSP
IE4lInozENYX81ub0DiuoOphsEOqkl7KwURBsRi2EXCpmFCndpuiZmcN1qk0ni5xtjb/aZdg6GwN
2gDISNBnFxQp9cIaSz+51XARxebfn0Rpkm6biZsF6GT6lXtnRB822XooTVL2PWVFU5XYTbEz0snu
lnnIX/w+eKMBAqAVcMTOE8pJdL+B/58StDJ420hfnfqLJwyHJdMg/IESpqk54HYyMwPMDK13vss7
pPsn2aOM3lkj2mme13/seW7cW1O/0e+A2wo4XJ0tTHqIaXI/cAl9qXHrp5zLe512AC8Ca7/+xkg9
TCS8P3Z6jjK1dh97tu9guolUmh6mYxjngHzbSmutfw5R8YF2HwDdtARk+tG4h5RDhZqlygUr/HE6
uIQZwI9I07XhEGrOonRcLd016wtW8oYuE1A+0deesn0ns6Zg9hw3D5YmAGyRkWVs/Ofk19PT9mQg
fq03N4JXE+EJvUDSxZd62UndTbmsNOVtfWfTKjYpuztZggxHVs7BgQCB2OUZcqxsIa9ceYciTBT6
C1ymXU2mmTH6sbgfZqw4aDaxrkywkCe/cA4VgLwtiPQodTyP/hK+dE+CYfHsis1FIh5Wjid4ObqX
NAcTQyN1574aXIeLB0dmeDdjb8tRpgyS97iSF7rNYcYzluC5REmF9cspKMH61Dd+ZZQt1i85x8wB
MMqywhLQOMpLk6equCoyr3S0ZrQFqlRtMmEEDSUfuKglsS6cbrZZlOK3fI1ecqBnfq5Ava/5+CiV
I2i3Ct/vvRDn9pIXAW3g9P+uRRSLKo6yI88oGs/NdaWJluKfhPHsHf8IZeWjJFjcgHrQhaH0gkMZ
IV1ZO/3+0b+PFIlk4qEzaH6ILeLQ/aE98Xeou6HsIlzsZQHKY386HeZTDBElG/MBq+IB8k5Zcotu
pH75rFXJNXqdJBZ71shhHDN7a3SZmCrj/BILSeUnR9NTjARAJzDKI8vFKeLgaAIpaccTYI/j+U6m
pPrh7/np9A5hwtQlKCdnfLdqzECXmMzONU1MxkMKI4ReCfsXV8owVTL6wzzhnHavNXmXd0pDC002
hmuWfQkiXdbv7UV4k9O0pMdghDRmwWG1Sn790xKVo8yLvHWumve9yJXtz8D9fvBurxPg/mOxWyD7
j0fZr1LinLjdz1gZTj2iawuTj6qjzSUwhmSDhHCrvHYmnYehu+yxnzLHXxmc++OUVCLS51V1e1YX
mEgHBtrs1tthWE5uKR2illSjpZdcnvNel6dbkLwxjBFBZ5lZgMxbMKl4oRqcBotSjHJsMbskbYYY
yERFf9LWQ7f2U2zYDIvYp2dNF7Xv2GmnsklbowAWefA2hvg9p8KqtHRCEr853xTN5eQa7htoD626
PkqSngYGe9D/B5nrpc7Vp4o9zcUhgoEJAJgmUg0weHvFyjRjjFeXHMjutf94ofVsKfxT7ZbmSQqa
rEBvL2tMYeDJd36XjFJ3cSVXenot5WEcMCcrsv7D/nhVb64pzLfMPB8v6zSG8qGUGDbc+McfkRM/
7FPmRX/OwGlRjfaYVvp3YzWUpFeWx7OnH4oB51NuYJIYZFWnY0XdUaDNg+OEYGVseVM1DXDraCLe
+w5j4NQYF+SYDrClnigSnPZT2ZDYo81P+SR0HTUUB3PwOTdJKvgpSKcJSUMawfjd2jydpVQnbtix
QtWAXUtdn71F/t5fEF2W1qW0JVjW1z50MvDvXZRouSsHaTLDassU+cn060CqPzDLqW6GHOU9iepl
LrSgjGqbzBM+nHat6Xhdz7A4EeRNQqlmYf/kNO98V79B1rIuz1ARXg26sqDexOk9HT8vT8hocxc8
zDcJF4TjmDWlbVA01XSfMIit1CKDn/MYZi8p6X6wOSSOvPhXdX+aCNhxay3qSRoTR+owpFEbLrUZ
+qyJ/MXTl9++ff38h34s6ovFpXWK7lWiCI8b2RJh4j3Qe9c7SsbZbag8PMcRixdmxpBCHjIv7dss
FDdiJKp3eHxGBqahI+wR4fyvijv8NvjXQR3ySexAHEtVTgSOhGL4OYeSdMugvM95WxJrESw68IKX
k7nPlhwg1+6mQjE5FJ2F/kv96f7Ri63kW6nndxheHaMOnMG4UHZMcjBOH4DP4a8p/rzXEMus7Vzt
TOSMrSm3xRW4afETpwWM+yPtn8cFOGm4kzFaW3BDf0xtpU56bNpPx0enHa7p0ixxs1lfjB5ciXHP
yt2G5KXkE2g2bnkwtb4AoERRHKXkxFUXOnnvt8psJvEm2Q036Yvp0d9zGC2QAHD8RBVTbCjxeNgi
3j/8Vjk+fHbKTRZMnALJQ2aOB+PJpnHqeaVzl2hiLlZZdb3e3mLbkc0+7yTQMI+lbi5ZlXGol80x
Esz/PvWeb9ZiapbfL1VxQXsTdMpHCE6eh7749kWHBohej++TUef+5DH5vdersrFZ5iLEcQpeO75w
+Ko9mGa+KxyWjBLxwT9lfDgVouz/ELyJBn57NDwQ2+RkMeeNfQiTA3S9/0benSlWJJ76oWNZPBxc
/vHRdJ9LdzoeIbYWxZzb9eHu2tfEk+ZVOg4S0Cny6cQL0iFu98dXjJVXwYN/egZcaXWz3oRTXHdO
cZ3dp0yI17GqPd3LbUh8uM+lDRqXZJ3dyrDp4gG+K9OJu9unWNrUPpvmvvOdeGttZlytTl6chu/d
sFW89nGEJRZSDj1AepFKqJrb/WZC/2d3ClwIFzMWBXD45nbNyt/ISUibKBYAUs17Ads83Uszo7Qt
2zL+gWI2XNLjODsN57GSdOs6BdwkpKbbRbUss9uOu80tbnq9dzdv/62mIpJIjHe3b/7fS8pERF3W
1WYsyagwMPUhh9hL2AZKWtfV+WUB+uy11HL+6Sf4DXb9p5/IykR/zkv4S/M7YcqDHdbZlMRDWLBy
W005olXjQbIPRYM57iil+RpzzqDBC9qub5eLs+xo8qvJ40eTsnr/OAOJ5n129FX522L+26+//tWv
AWecnEXysTafsMwZxsn24hxHkvuoZ9CDjGwAlPz+A6Uif/mqJyjxkr52DALUXltFvVrH/T3O2zEs
x3ZLq6GOF+QvdsPLymJb5F1+6diAMoBhXu+8Ldm0wXgqk5cVcMW3mEUK0ynoXaeRgmdFhXASwQSb
Po+uZPa7bPgVPou6q5UU+Z+92CDl2b4FV6tzN6+dGN+1qC5G/L5982L8m0TySRwZusrQMhF0hC5i
Wh0cvDv2pvLyYVyLPb015rSFzSsXVCHv4yMMPt6W5BN1xJ/hhsIfj/kPuKeDT72/KGvXnmRcZl2Y
y4YvrpMSy2arcbIcuSnSOOcN50rDaqAmUc28pBjrW01X46Sp4fw0hiq5dAiHmVJtG1Ce5qVJxpP/
AiP9sqonByyjCeHHvDZUkpL+PUYF3uTf4RX3Q+AoZREmB0JqbTbmmAGIc8jOFxs/ieyyLkrK1oAp
B5HfE6sdUjGHmXqW8VFx/pj0qyVV+JQTpX9mVytM40nFIm1wsMnjt2qUBfScOyTbGmS2Ix2m7AeJ
+o75WxM+pEp+jcXpS9Cy89Q4cDg8EMx+TZFJ3zAYf2Cbn2byY2bsbEJbvh8eBx2UeBxp0DcV0jAP
Z2aD6fxmHFJXA4YtbzHoqtqiXk5uwllzuduaKCc3N+qwy5zN8ExorJlBWPdRVgi/FSuC5PNYQWvf
8gGdZwrfUOFKLHlZX1zgfTFFH5nWO1k2Jb6wkqq+JEBjusPzaiadZ8XWzuVL0X1pQi92Tn7QQHC3
P5yYHqcTAuW5hcTNFHHo0jtAtSXyuJQ9HNXiAo9cL1mGVw8352y3FecNS2CGi6bZVb/9Ku/5hwqy
jGCQnKiHAG2PG/v8CQREv69WtqfM1DR/s2tQqPcB4LN0rBcmr4jwHJM4y/kVhtUcAm4KJToSYSJI
MXf4IstmO/7siLYpG9DPi/XwpI81QeFCYgb206Cjb+eJjKPGgKTqy7Av5AHLYZYsqIcTo9+HZMT5
rpbmU6ccC+jxWg8yKMzlBojgye5tU227m8gBtdV/wV3pG2B9wteavEf4GtPFZPaeGf/2WKXpOI/N
zBsE/Wvdvx0DIVIOlZnDVcwdO5g0mbypUHwFbfsFdsid4hUfsDmwARppTkISSVfHfZKtHGP0nKu6
xvlTcZAANq0P1LLDcaklerGQ+yLpBMJUsRFLS17hH7alHNiLZ7HOi9WN5SzcHcxHsey52RzQMogA
c+BDBroHwH3gmU3cC15by3bwEk8eBJ25mcOuZ/S4AsxuRXJMQvQTq4aIC96BS0LeIPkV+xuEdMh1
uHZpRyS6JNt2rCX5Eh2OZiS+yQZtnX0efWaETXLpOE65SogU8Q/VbcsbdSy7HS6/DUpKm06JCCYX
k+xPZIxJ74sV6ZzjDglLJA6FtKUmK4OxxAUkjSs5RMZu+JUFLT/Wz2HSM/TjdHypUzKpedIStCHX
T8Ua+iPh3OLuGGfYHjCjC1+2OPU9/YTujgNUcga5OGQX5+c7kAyKbSVsEhODoziGvZKFKX0RJMJ+
/2eX4IWcI+F9Zo4nXEN0LSNkA+l7xU+VOD8oKstNVZS3zrWlEiB0Z+Oap3wCpm1Q+dQmBo+9gYJD
9NmagyamRYBXeOlcjcNyTAv5cRaMRyVT1mmAUrOc8J+nLMh5b+nBOYbIbYSXLqIpt2BHgXmhKOKj
AqJVUoQoK9SD3xcYeCsOjcP8s3AlOBHTLgT5Lifib3q7X5i5ZrR3Iou1OZ6mIUPLGWxSx9hD50af
PDrNHmTm86ijkK7b68jpdXSat9Sbh19TJ9BJbbRkgJ2sTQhVPInPXbIP3eIXAc3UNo71zLaDcfX3
fvhih+6A2rf1CJ2YiGItzcn8LT01JmLaFgxG/tYDtkgjnSVjdJurgdTUdAl5hNy6KYMuF5KoNSOg
/NFS+kZ/nsw4KNe5uam7GLDL1JShOTNeROJQokaRCmoulN9uMtPqMvsv2Cy1FT6eh27TqeIpvLCg
WFZYUaWTs9viH8re7Tchj4/Etw72e6Bl5x4s+ny3aRbvK514RNm2NsC2EIgsTZMo6IvLVuAVocre
y8XF5fZDhf91R032D9yEEkYKpMafa6NgBBEv/IR9gu5a0pYQkib4LR3SzkbTlsRNXH0rKeLFR8by
oNQ7uvNU52pHuONMXqv4cn8uOMYe+bmL/7wAibtIA2m0uPtSnWp5STIQ1cxzuNXMR9JuUeewu9CJ
9v6ZW86ncLTyPytiuNKF37tDynCFi6CTK2TsuYvW51CSTmXsJ7jXyS3ScXppFSgtltDdJzeXGfPv
eUl/UYIS1i0wDpV/kyKFwNzVfN0g90fvq77/BKb7IIPCkt/tbPXCfr9frQpKMGjF/VoeHKmmgzWm
PrSWU5qezCM9y4xxozhBYfG+WFBVouz9ojAv5BNUiGTj859+UiZFVc57Ej54fmnLdP/001ANwNic
HAQmCrU+xfRpE/rsfkcrmxikngZ1dbUB/cXpMBP7nbtpydXgxcWXhtbClLfv8ry88yZTXuyyas43
izWFXx7RDj/+57XPaObYt9F32OLAhuBYXZwCCra2JZ+03muK6myyupnQQ9tBZ/biWR6anqVRe954
bkZ9IqOztTa7zTzbq80309bb1XBtPyNa7o+Y04fovEsuCTiAmVxtR13ya3WI1OrgXABya7hLAIjF
2btEu3iDhUCxVT25+47srijx4pn3dIK1yeSL7OUruKUPyY2lyC5AzoQNaMZcARPvrr26Uim3BZ0w
70G1nWOORLZ6S3ZidXWIcnEhEA26kGpPh0qMiDrUmJa2wJcC3/aqzxJMW6gO3CTDknCrWicHPuyL
KqbgVZF57xtsn88+LJbL7KyiKavAzQ/rB99wJaOJC72/7WYVx2ZBLmpbuGz1O9Pli8i9Et9XWt5h
Bh/OHgQqqw5+4JtM+l2GV2FGkk9hAik4MFojU6eht3RPztBzT1nQ6mXZ3DZ+9UmLKegUc6JjnrpW
rxTFie4gQDZHx8ShC3F08V79kLpx+02hOFwZXAyi9e/hziWKp0GXclFmt/WOq76KQSDbflicV3/f
j71kLR4BUhj7vXs4AabIaSDmDvFUqver3XI5wu9ezV4/e/Xdt/8lDzcIju7xEO/qo+gnRot5bKAL
iTMfYjrBTNupeis8HWXP6tX2NZDHF0B+Xq6A7w/zLmOoQu7uxgT/s8JHrKwFGf+K8LuAuEaeOk65
j+Rut6LRWMJdVsUqw3K0TOcbFnXdS0hFQKQmcZ6kPN6GCKq37oM53PTFiBDwi7TTt+47CEFXw0ed
duT0Ft9xe2WUPPliolTKz5Zfby7SHoV4CNQC2AA/f6KIihtfbxYX5BgUyKvJbSfKvG4n0XkndUqc
GA9oUDmP+B38yB4d7F3YabaR9krfWZh3GAMyMmHOZ7v5HNPMXWAdS6R++JHL5Vn+4Twp4P4Z4Rr4
MrNoB03JDQuLsuKEy8U5PX6BkuzESmBBQOCC5vdSpWUSPjAoQYEakBPIpl5aXs6/ZQ2IN8YpUE4R
mL/pyRmmB7KigetwSlnhQN2lUrAYvWLqxAJQxABKrshdYDqz8/oaMHRx5jBCd7XoYMZ6NBqnMWCM
Z8QfZBup2qvLSLXrFlHACB68q5XcR0U2D32I0s8Nkvg7esz/ADOaoztW1TtAyLTwJthgeN3mPXX9
cNi/0o+UK/CTY88XWLOMCFa1G0qc5diMBmd9P1+8xQV+xPMyoKscD4JASWyRoGbEd3DeoviIxem5
m4/S5z5zXnPnePnd1ilQPseGVfkikNryeP9VmHd7CHNoVfJmWJmnuHbGjVKCUgM0i/GnsIaILOPY
jJCu8QKA5G2REFROOu2wDT/hzp79iWcS3hRvggurer7BjAEslFfRqcuC4Reuuzn7dw8GEp6iLRIZ
TYTOOtsqoqrZ2SCmX07JS3xpNp/DIQP9PD67pA8ak1z0XXXNVdZUpXWpQvtJIwyumXisK8GlafS9
XgF9DOPQl3/qEhVvYjCDywjwIMUiG4yoFmypJRkJ+J8ohr2EZ6Q8Zzqwy2hGLpa/mdmVkZmALZLU
AgEPrAFeZ/9FXIHFefTzF8fR9PJTcnpegrZITO91jh3kdB/2OiA6TFlysWfNqlg3l7VxiG7QUeQa
ZBxgmyqzBIjhW1GSNovOo0zBT9AL1nsudmpptFIFVT7nlvQYxkV3NYLIWBNfPGPz4Ytnj63NmwyJ
tygsrJCzFtl3b7/9VowD2OVRNqTS8huqeePAWZSSfUeu1mKVsyEBawFLpfZHo6PR48iKaXEFQ0ZJ
9wOOztwcEeOsMjfStzcmaDT673E8Jvrn8afFaiafjP2EWFNIvWvjXv3Rwy1y050SDvnfo9/uFCcK
vocJ4Xv4b/C9Tg8/6kfb4lPCOuDbCPGL0M63mlnTHYN/MoAvB6c9/+YGTTAg5rTn366gCfpMnDre
uwxv3E5/GXilcACGA5z8aCheg7GqpcMkHwWmsJZIZjle/ZB0BExaK4yHsk8fpy2mIa8MY8AKiNYM
6JqlFGI7iFvQsnuL+Az3bNGR3SL597N2KboPgi168uguH9qzPmd3Ab+mbVuIV+qgLXTfx9ozpXSc
W/ue86XYs+eP/2p7TtdP9xyDED57z/c6+MdiDFKQ1Ps0fD9RL5z23oguqd6IOft7J3y1zB5EHkDp
aAQR8+CX0K3SWCaQcWcJzm0TkgdPLcYMNSJDe8hBHLlhGLJ2KdG0rdcWnEhesAidQtWWTfZNms7N
bJPhXEkDmwfSRstp+NM4yNgmq7nTYPO903QhXNwZDyBlCheWGctO3nEc8hjlHKcR/7i2ez1vC/mO
ZUDDLEk6knGGfHRJ5uu3o73fL8n6vZIKVbThwe+qlfuecdQoojutCe8Gg54TqRJaNLn+2zyU3f0A
4zkGQ4m+mvKMhN/CqRuOtTnbLdB4NZlhRp4bLIPTSLhxX+weXhzNdoPPw6CaPso7YRaPiyYheX++
3G11z18Eh0BiHJNOcdv3JHESgSUQkcvloNc75c2DcXArA5F7kr0EPW61vHUNWvhW18Sz/kI9skIN
WuwsovGP1upa76g6ZYmRn0hI0VaSPXOcEIbob+KqJOR8h8bL/C+S12MBfVnKrWqrCA4t5D4ZH5S4
BaV5NFsWSHxxlAeJc3gFEk/nqKBrJFVCYnMBbBF53Kn2iD28svR8RkHuJWRr+O9BUoBCHgel8mY7
nCPqFq3N8QKynCDqFukJ5ly0Fy8h8bj1z4g3myOP+HGKQcYbKpi9h1sn7S2JlyODEBHb7oDG0wGX
ZSRYR0w9aX5pg2b/MfPFvAvv/qeS/P4JpAS10O2/jP4txBgQmjCVyZzEwQSf8+XxdvRJWeAMJnVN
TAJi18Qkyqd5rRVvhOFGF9+wW/NMCFu9O2O+ixansqIgdnp3YhQDrlycn1f4tjQyhYAtfsC57ZZY
KDjjZyM4PaAkhDXrumkWWNUV3Q3I5K2WKBqjqbN5gQHeq62+fQJbrCrgwywM2KE1uTm64SEQ9e7i
knVFrRyCvHi3ra/pGY/C7mEPG7SEwUBnXIEW5QnMKorclS8L/HtBRQIwey6Wjo5ZLolcTB6/DEqy
sGzx8pWYpbEl7ggLFbR3HO6vFaUbs37jS8dZLSlxclF63zXulzP0qd7MZvqdgU6e4MIw5chkDnMu
NpxS6wc91XVT7cqaKQTm+V7VOlzuFjBeIHu9bYmE5lQK3b5gpEe/+/ntv8PKX+tNjbg0uQLhDD+/
+/jmf/+Lv/mbKG0SOipiqiXN7WPKqaFr8IfF6qvHfXxiVNmX/BqpgpMEPkKzwWo7mMaPlzLB+fZ2
LR6fbSmSLIdEaIfrRRnIFbAgXc/5dYn+28P+tmiusHn28EX28PuXz7hgCnbWeENfB+ic4PvXr755
/sMPszfPX//h5XdP3zzPwlzkgMElcRZezwS2plwuJ1cV3MXlV48nr9bV6nuGcdgaNhZNM9I343VY
GaVlmjecyHNb6VwM1ygbHx3U/xtEnP9IfaSrm7iodY9qRiTa3ezoa8mQFjRERkMnImeFSeUWpeUk
zsi9d7+8/Veau2xdnr379Ob//GvKW5ZRwAK59KP5/WzHCU+Izn3/7Pf8ui5F/p7Rz5W6KXqJwEYO
jhN+//USKQ3GY1jCILKp+amIgNdUJvsQUFLo4qRXYjQIR5CURMjuOYzY2RvZg1L2AHUu1r4oUZGz
WryszRrf1GWdQlI+DjCGm1J6D6bSGHZ4mE/M95+8gWzlC6/+nI27N+xW1+eGuHVm/fm+PHu5el9f
YeKEbABdF/TXQMgjSF0MkE1ZAW0snA6Z9lLapNqiwqPD9Toaqbe2N4cJ80dXLsACSWsyx2oCIgqY
jbSX4ntiAIi+nLMIIK02jfJiYDRwZWB266GOtY0c+d1EtemXVk1S6FJCH+8hTWsXZq4U4N6mXq/x
cuHb18tXbarFfMO1tJA7UKZo/BuwZD47K86vvKgQow3hZ1s9UqD30o1y/aY2TwmTTcoJ3g9wByDh
L4ZxqqjPSLtz18jcRC6AVCDxKA4U9sJziGt+YLRe1Erel39E7XozDNtNEqXFNQ/zE3w59896+PLV
2AqQVmzMnS1ysB8uoXv5h3TQSkvgSppos2nPhjPAjBM5ekpykIrmM7fJi7yLvzX5ho91vgMSrqVC
S/cElXpiil2Ge80s0t4h9VPkm5cem69wd1Rc2x5zX6Y0hmhOwzA74+E2U46hxlLyX0LgE0XPdYUV
9sECE0PbfCJxg6ZbFHFIM8H6OU7GBBti8tdU1KFbJBWzrkrTXHJlY6bwvM0EoHXfX34HgtR3T7+l
BOxPsvsNdMrux9UJnJ7z5a65dL01z9APChSo7TUWybpm1EfCNlSY7WnWeEbUbLg9UzfLaLvcnZIl
UFkVTBCyqcaoRCHZ16v+WspzDYAhyFe0vI30VMVrS5pahQUiMXz4fb0oQbO7rqzf2aZaslYGAgrG
Z2m+NHFXR8fGZYXKF3kiYxRg0WBurp4hQriAvdQ2rCvWzzEzey+mUB5lMvtqVDIvL71mpPf60M7i
yuACSLdDzys8K53S1E1xnZbcmEU+084J9EyfgkR7TsTp7aq6WZPW99z1LZUox/luSYfgwDSRId4K
MjiGa8c7dQWyUVEaKcvxr9OqZSRujZSId4ATX3NvCLzeXGT68WlCdwq6zOQDNpYNA9JXzrBU+WwF
VOdyUZbVasaSgtQm5FWgm2dxg0X0gkKBGsuHATNi7YTfThanwEpB1MBaR0tOIN+f2TOBiaoZllAI
Q40W2Vg1OM2hoTGOLmYomS3PPMnSSWWLTFGYY5DGFmCZEZBC7DAiKiRZ+POIVi1jTJK9orr26WQ0
uoP7dztdkYmB4RXDMMTv+S8vdG49MXoGaDfsNg4wvvvz23+jKtv2el0uNu/+65v/+7estTW7Nala
hOeb+v2CKJKN22KrSK2PQmQ40pzSaA9DgTPW38jCo3Ne16ur6pbDKaSp85UyRbS3wWSs4rZn4Ysq
aDv52U06ryBQShQDoY/0JyMk74Zry6lWmK4Tl68W/y37H2M58aNATh+WwPgqcozP9bDinbvl8sOo
CPvyH5APDuKT8IaC40oAvsUS/8bCB2hxJMPYrtmhiOJ7VRurnfKmAa9oEKf95nBb3zhDx00Pc1IY
YrdavNtVY83zO0ZFhEuV2dV4Q+B9B+AudsWmANRDHlajOZKGS0fg3YPbuqxBFSvWiw/FBvjSk6PJ
EXINWgTBH4PfT8eUwCmeFQ2fVz7hwxu6R0Z1383pXl85J4sdOcvNand9BvJbGb7s6NBOmSA7Wxio
r4MEBjDoq30m11cAzlDn7YqeYsl7QqUCiYJOSFrWOWY4DqDefHFzbJaRKC9SrSlYpK63uBcKB5Dw
+vxqhlH6+D7qP4PbGzPs83Zh8rto99c9t4Sa2ZX2FwxskrocvsraHaWr09wlMjc8Qz97YDSiHKh+
H5NxZzzvgIb6Q7I6ikGB88sKOEhLJiTTClTi+n2oL3otGJUOKZ/ZBnA7Rg2Ybo8HibeVmTPc1sVt
NIbg4KkXG0EnDELS9v2Ieyr9dE33plpwSO9lRG4RmM1arF0EssN3VNalN2+X9Wi3pN2outmi3egE
VgyiH+gK2wkDIYk9rjF1LlvcNbXRYMZEja22mwH0CQK4oq7KSQeWHWG3if0zGsHo23POJUA7whPH
eSBMXFgrt+KABydPA7Ig4g5I4asCvvFEgcyyCiZEoBSSTkPlUZvdmTOBeGv7FIFP3VKF7M2ljbRy
YriK7CcPjX+a9cQDUoH2X6iknrxmhCBFCb9zfxT5EbMg7M6G/ZMf/3iK/AjFU0uo//D0P/+np99C
668eqVyPkjA1yJ7IzxZNZWj852TKP7KAfuNAo9jlIchEuFSKN7ly8U3v3X97+3f0ZIS7cQ5nUu22
i+W7//7mX7JJvkePhJzxm+LzCtBDMjKDbC/xmWzcFHN8yj5HEzUqmchqCxbter2ny2X2Df7G9aP4
YgK1rzfoDlVyQSj6WIlhrKxgS4CGczWrHqf8YMcfTptlnikJZ+gNlVWj7aLg4MBLhYcfXKlWCVfd
XVxXKj7SZ8AsAIYrfUmtjqJZnBPEe4OtQJdBSEGqPT56/JuQvNhfWe2RP/xG681uVcHxkAfMduj0
GTt9Hv4mDL3RQhKf3FdBUOvTGSKw9YR/D5Lr8k7zcnC/p0kJiQY4gd9Pnd7rXdgbvdngi2h6WiSc
HGArJzkbppZD41PcGYzhJtBbRtP4ZKsjN4DN/2FXcFDO3bZQSSevuiO6KDI7ICpKdwBHy3IT2zlo
fxCQ/gBt9yk4i52dRs8r0vTxWzYNuOcdnWJCUKNG5AGieExvd9wt436+tLYi0Vyuiak0Refli8e2
3ZPj8IaFqfmq93g/ivPzelNKuDAtatAIDFFWX6qUN+SVcxO+EK2vuVJLT/bQ5jqmuya5LU/jaSZN
vQmz1KBrBFJ0Z4ljnzxEWU2px5MwXl/hchaAgNHEJ1PqdNpWqhPa/eFN1iy2O6bdXEiCqXl2TakJ
QSe7CIMTgsSzSwe5QuFC6Cvi5nndbJ+SuwtTWkt0ndfkp9z2DRDnh9x4fF7vVkTiU+zGqooIuqRT
BDmhEIcn9Aypr/EClKomg/Za7s6xVc9bFWz5WNxxmnE9HxdjHuJL4hrjbT2mKzaGMcbOPcH/ocjB
Ca4Z9XEaoLxYhgudVBksYGebirG08tPGGNblsALks81lvSQ1udmBZn1elRpohv97AWfu70U2X1Y3
i7PFEsvvXXONRtDQqcKOlcpIYtSjFWgwCVtBy/RloHvMMI2sJgYDymEJy+NXJtkVaOjEdMwOII78
yn2szNi5IFxGUnp4Xtllsr3QsD/SPlflN4IxzwkvYTC8sCCz4Hw2RDfdeh//F/srbCqfasT4GGmO
uWEQdczoaTwLdQx8oNbPdgv5YT3F26XXg2igNCO3kChsXDBnS3uzs9f0Kb4Sd1xO9iMnTCUi1RDR
5y8s3iIuIe7VNT677/W09gUq+lvqch4fPZq4mQXoFgwtkJw7JJ+YAe1QeSSWyZgslskfh4pFPs+N
pQqKAEAmAjLCzRpUkXKYChrxhdw4EFsZvv+7y5/vdr/abxEvh3bSuSPmXmUPwm1rA8fBmztdH9om
Ilo4Y4jfh1whfwTXp06PIMwh1UU5tka+kIv07n+8/fdqhuY808hsSEv6n2/yAWlJb7dIahHpkSCa
VtbhwvdRCuoJavVrZLtaehC1u9V51VpZUBug1LTA0oe/5/CSpzr3c8m46USeyL/NxG/U2yUDVHq9
e8TN8F19Q4lR4CqbUAwtboD7oOohmZi5nij+hLYasxW9e+zAtN5UwqIw/wOrdWa7NhW/fiJ3KeEa
UoIZdKKk10esLAnDYKUn5FMrysIi2RBAzcQpL+v6qpG0L2VGRSiLhozgYjGA/uQ49lJhAV25561Q
n9m5DGi9uS7wSX29LFYE9tD5bOkhkUNuzKZw24jx8Tv8bSleuhWo4yWuAXaIvVBJHGjOizW6/16C
tPweN4Hhl6yFKAW4xb6m2Y+rjyP4zyfaih9XfxaDBzlJZNsPNY2Kmw6CXymWKBwXJkRG78CIbqCV
w9zpVMxJuw1NxajqpkCJo8mGk/cg521n35BPOlxr+suYU4d5LnDhkxVJIAs7Cmnv+J0zh+TDxCik
NWwlmRpgJnIYwIgprGGC/cTa00BHejstF/N54/MYd9TjjBxFinVTzeYoknonSc3VI3hGbgitpy4G
lAYAo/c4xhFORkL/9Uwsu+Hgx9Ug5xwk3Mt4D3QAZFDrG2mDiylWVJ0ccdnxCf6hFv8A2PJ1IxFC
t1lfkuJTS8AVrslKfwHYk8kEUIe36Qxt+cHWcevj7JHzNovmo1hm05YO9GjPLId9nVnn7I+4safY
Sfe4RPTZpiqshxlnqFR4FH8Wo+wcqQAqSdUGnfkdKE4IwulpXGL0nBycPybK0fM0D0IHYKqMxL0+
tfYah72clx1q0WKx99bZVkfJJ9lYS+msWKI7AJBbNGQ3pq6rswNu8RUWmPnQHshDsHvQWjRQpRhn
H7HrODs6JX9vQObADOFdMu8ApjQd+b2Fx/Lg6OspSuLoFddRaSOE48HRNKixwfDj3v+657krOBSY
b1v3rTYX7rUatpGv4lUDXF68X5Q7kKGYQiyY9UVEnrgAB0SGY3BHggA5J+ziR/Shhys44NxlQL8H
fAOfYjISSldm2INmDRMWwfR5vUY/HoGFvKlYqxVSho/4Szqu7PyywAf9ahPc8U3xYaY0z90L9N4E
sjXI1SGLKJhHIk9M35NHp6eGni8pqbL5CQ7Li95bcvRSg5rxcPBxkONM/pefUl/+OXTApOHVAXfZ
9QrKgBCaIfLidjzIli6mUAvjyhITc4MYL+g38cX2USI+fwro5fd0Omk95z+7x4GYUe9AgwbBWPcQ
piATBxzGxa64qKwZgVW1bMAYDwQcx8VB8SOPLneJvkAZDvP0XrHBZFMJLhsbQbGsxVr+obid8BIi
5BeIAOl4z30EUibIuzw9YkwgZxNEkkfO3+errfOV6wHYgivwbYguESHXkfGAE0kysLrzkLYoywaH
PHRKD97h8O0S5zI4VxlmnmglEMV8RH/VYR6F75c4ov4IgGQPBqiC4ScA58uh9ZsaH5FuhsgMG3Xi
kUXiVtH+fQr3T+SDVMPEwtf1epheauInXsgJu3HBTtBWKKAdt7UNpD8P9m0Ubo/jVRZuiw5sWxwH
LmI8ZA+1nu/ZqYVeF8WvBtB0TTEXX1ndzfn52PlDVbbvimvXmcBrDR+0MisBhgrITDQQ8+hbo5dF
NceXXse8ZDkUJbkulpTC0pXRuaIreocivSgwAP4hfQLt3Yh3ixKoynH2m0fI2b+G/+DW1Gv0x3uM
1j74DulU41KMUXaUUTAJaBb1jjJ2M1MAIEmHMq7s+CaIXwwZfrQeLH6ujvGNi2Z++FiuDa2spe+G
rdjamTqOEUwznwzS7K6vC7JjACbcbzL6P1wcEIZMU95OO508hC1g+60iXVyflUV2M8WKwcbd8mYE
wyzwKZ3qCYyM6p3nXYKLCW32h7JoIN0xBuZm683e2kM6yOtfWwf7doKr62o5hJ9HaIP4uVrBx1w2
BZTEs7px3N3wjUvCneS3QZ5QsEzMih8KCDxuTaLj8fEgouC8fDqnXFzBODtGYFFNS5szVPpmZgQ5
4JGuII+FeHvg3pzmywPnlbs6q97NvPEOhGAbTL79rHnvsGjECW9O+uLuk5px2melGeXECf9X/+in
DsMuVvuOXciw8YKeuq+qlLKWKA6aoSam0Uv43ivb4M574sEFtGaoMYXGgsWWqikKgOhVq6JWjQ/Z
22KxBOJcYDL5CTCsYJUDYDtnBUVyr9RZ5ZJcXyhK8DabEeGazSb5ICjhuzPu6fJCL1qnA370sm9j
yeSLEyGjgbqmknH3TfPe4i2LCtQkybpSka0GXU8/VJWUW4AfznAzGaS3qyXGkI7HSorUtLg1IjZW
UsGXtZKD2Erj88qWLitjs+8R2cKQk2H0DhqX0LPRwoIS+HWxFCn45VykfEqtCj0JNPzu1uhiyKaW
lIUSYNyy1yqupMsCdXKacOfn7XTNtDiZcwmwAfE2w1FzEGxAqspNwuQNamikTw7yxBRyVh1zbMSW
LpPwPTxkFqqFwIfkOuBjMQOQIv60a6iW/Dn6adE6sAAVzYWo/XO1qVHwuHDcmsmsw2rk6qIaXmPm
XuH5OQcOMHB5rA1gm5PFKWZupTbwOeHL6JlbcJ+yJ9mvHgeDkUHhUeKdXGVDkq+oaP3NtsMccgJU
4gfAVAq1BLnEoqCi7qBTiMgGDiYvVoSsI3p5Hb8nE+ll/YEknYXvbCA4wzsy9X/Ts5Y9mnqvV2av
kX7b3Y6LezpnZPqkq3kSEGP3XMaL00NsYR0HFB7SAQcTHs6DrtMxDvR7jucvOaPwnKbjRAP/sEyT
cCGiMW0Wa8dYk/INUaVbo4YA1uXibLLCD3SIbPJhA0g+OmD1if8RsN5A4gvTbpw7VKDyGE2Sun4O
ATmEeCRw5+lWXHEAd3AD0QaLA9zfDLp1A9BMFiOdcmQmDDyG7G3wLuaT5L1MgPctIpf12UQ3EPIF
GskbEdyQTcElkklh6rWDm1DrTiwUp6NcI8c8SH93EKShVPWaUP4OcAdlyFNq5IkBysAa4yKSkQeu
pKNBYWvQvraTNdvCBJNaN+teFgCTGEIwMIn828/EewLboSegyW+MJwj96NESUvnnW0Uv0zt5To6l
5bnZfGMIp9kA8OnAUYnpGlKc/yo1eMsEwdlRxHnuwUjAfwaQvOi9UAbD3xnMgyhcqxa1/5RhlOua
U5IJJnLAXsMOBEOrS4pthH3IacbhldCcq9OcHRhJNuMRlQRe0TMPX5qrU1vvA8dRk0Yk8CWIz6vr
BRuyPb4q9zhmjq3XiUx26CDvUBj8u3Pyb2RNOBuc92lb2/TtpOk89mXukGz8Vdf2fZHYPuIOXRA/
W2h1kxhoM5c/SmqkADE/Xk0VrE9oFB0gcIPuF7csMYau55OeAt4vJTbWnDHOLPr1zEOiNt13advY
lXdh3a4SSJM+RN8RvB39LXy5f+a5s9KN8dbT5claqZ+/1IMIVCuP+8dcrMHCcLVCIBLLbSdfjs0F
A/dH8r6UIlzq0EzqMLkCYA+e4BIzwvJP6pLMJkOQt/V7+voBXnzqN1XKt8HInpmYS2mgB9TPvZ0O
93Y7BODmWt1Iep2w4ZhchOlIxHtoSnJ7cCF4+WqW/tXjPH7S8i9q4jXGUTwGodqEICxWu2pP/3H2
uT0fxD1lK4IXFX1Geey/LoWvNkFnm4NDsEl+7737X7vJ/wfGe0qk
"""
import sys
import base64
import zlib
class DictImporter(object):
def __init__(self, sources):
self.sources = sources
def find_module(self, fullname, path=None):
if fullname == "argparse" and sys.version_info >= (2,7):
# we were generated with <python2.7 (which pulls in argparse)
# but we are running now on a stdlib which has it, so use that.
return None
if fullname in self.sources:
return self
if fullname + '.__init__' in self.sources:
return self
return None
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except KeyError:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__) # noqa
return sys.modules[fullname]
def get_source(self, name):
res = self.sources.get(name)
if res is None:
res = self.sources.get(name + '.__init__')
return res
if __name__ == "__main__":
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
importer = DictImporter(sources)
sys.meta_path.insert(0, importer)
entry = "import pytest; raise SystemExit(pytest.cmdline.main())"
do_exec(entry, locals()) # noqa
|
djds23/element_gen
|
runtest.py
|
Python
|
gpl-2.0
| 222,798
|
[
"EPW"
] |
5746809f6c8cd171c089f3f612f24c9983afa7962a25fea55149b13509a11aaf
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coordinates a global build of a Spinnaker "release".
The term "release" here is more of an encapsulated build. This is not
an official release. It is meant for developers.
The gradle script does not yet coordinate a complete build, so
this script fills that gap for the time being. It triggers all
the subsystem builds and then publishes the resulting artifacts.
Publishing is typically to bintray for debian packages and a docker repository
for containers.
Usage:
export BINTRAY_USER=
export BINTRAY_KEY=
# subject/repository are the specific bintray repository
# owner and name components that specify the repository you are updating.
# The repository must already exist, but can be empty.
BINTRAY_REPOSITORY=subject/repository
# cd <build root containing subsystem subdirectories>
# this is where you ran refresh_source.sh from
./spinnaker/dev/build_release.sh --bintray_repo=$BINTRAY_REPOSITORY
"""
import argparse
import base64
import collections
import datetime
import glob
import os
import multiprocessing
import multiprocessing.pool
import re
import subprocess
import sys
import tempfile
import urllib2
from urllib2 import HTTPError
import refresh_source
from spinnaker.run import check_run_quick
from spinnaker.run import run_quick
SUBSYSTEM_LIST = ['clouddriver', 'orca', 'front50',
'echo', 'rosco', 'gate', 'igor', 'fiat', 'deck', 'spinnaker']
ADDITIONAL_SUBSYSTEMS = ['spinnaker-monitoring', 'halyard']
class BackgroundProcess(
collections.namedtuple('BackgroundProcess', ['name', 'subprocess'])):
"""Denotes a running background process.
Attributes:
name [string]: The visible name of the process for reporting.
subprocess [subprocess]: The subprocess instance.
"""
@staticmethod
def spawn(name, args):
sp = subprocess.Popen(args, shell=True, close_fds=True,
stdout=sys.stdout, stderr=subprocess.STDOUT)
return BackgroundProcess(name, sp)
def wait(self):
if not self.subprocess:
return None
return self.subprocess.wait()
def check_wait(self):
if self.wait():
error = '{name} failed.'.format(name=self.name)
raise SystemError(error)
NO_PROCESS = BackgroundProcess('nop', None)
def determine_project_root():
return os.path.abspath(os.path.dirname(__file__) + '/..')
def determine_modules_with_debians(gradle_root):
files = glob.glob(os.path.join(gradle_root, '*', 'build', 'debian', 'control'))
dirs = [os.path.dirname(os.path.dirname(os.path.dirname(file))) for file in files]
if os.path.exists(os.path.join(gradle_root, 'build', 'debian', 'control')):
dirs.append(gradle_root)
return dirs
def determine_package_version(gradle_root):
root = determine_modules_with_debians(gradle_root)
if not root:
return None
with open(os.path.join(root[0], 'build', 'debian', 'control')) as f:
content = f.read()
match = re.search('(?m)^Version: (.*)', content)
return match.group(1)
class Builder(object):
"""Knows how to coordinate a Spinnaker release."""
def __init__(self, options, build_number=None, container_builder=None):
self.__package_list = []
self.__build_failures = []
self.__background_processes = []
os.environ['NODE_ENV'] = os.environ.get('NODE_ENV', 'dev')
self.__build_number = build_number or os.environ.get('BUILD_NUMBER') or '{:%Y-%m-%d}'.format(datetime.datetime.now())
self.__gcb_service_account = options.gcb_service_account
self.__options = options
if (container_builder and container_builder not in ['gcb', 'docker']):
raise ValueError('Invalid container_builder. Must be empty, "gcb" or "docker"')
self.refresher = refresh_source.Refresher(options)
if options.bintray_repo and options.build:
self.__verify_bintray()
self.__project_dir = determine_project_root()
def determine_gradle_root(self, name):
gradle_root = (name if name != 'spinnaker'
else os.path.join(self.__project_dir, 'experimental/buildDeb'))
gradle_root = name if name != 'spinnaker' else self.__project_dir
return gradle_root
def start_deb_build(self, name):
"""Start a subprocess to build and publish the designated component.
This function runs a gradle 'candidate' task using the last git tag as the
package version and the Bintray configuration passed through arguments. The
'candidate' task release builds the source, packages the debian and jar
files, and publishes those to the respective Bintray '$org/$repository'.
The naming of the gradle task is a bit unfortunate because of the
terminology used in the Spinnaker product release process. The artifacts
produced by this script are not 'release candidate' artifacts, they are
pre-validation artifacts. Maybe we can modify the task name at some point.
The gradle 'candidate' task throws a 409 if the package we are trying to
publish already exists. We'll publish unique package versions using build
numbers. These will be transparent to end users since the only meaningful
version is the Spinnaker product version.
We will use -Prelease.useLastTag=true and ensure the last git tag is the
version we want to use. This tag has to be of the form 'X.Y.Z-$build' or
'vX.Y.Z-$build for gradle to use the tag as the version. This script will
assume that the source has been properly tagged to use the latest tag as the
package version for each component.
Args:
name [string]: Name of the subsystem repository.
Returns:
BackgroundProcess
"""
jarRepo = self.__options.jar_repo
parts = self.__options.bintray_repo.split('/')
if len(parts) != 2:
raise ValueError(
'Expected --bintray_repo to be in the form <owner>/<repo>')
org, packageRepo = parts[0], parts[1]
bintray_key = os.environ['BINTRAY_KEY']
bintray_user = os.environ['BINTRAY_USER']
if self.__options.nebula:
target = 'candidate'
extra_args = [
'--stacktrace',
'-Prelease.useLastTag=true',
'-PbintrayPackageBuildNumber={number}'.format(
number=self.__build_number),
'-PbintrayOrg="{org}"'.format(org=org),
'-PbintrayPackageRepo="{repo}"'.format(repo=packageRepo),
'-PbintrayJarRepo="{jarRepo}"'.format(jarRepo=jarRepo),
'-PbintrayKey="{key}"'.format(key=bintray_key),
'-PbintrayUser="{user}"'.format(user=bintray_user)
]
else:
target = 'buildDeb'
extra_args = []
if name == 'deck' and not 'CHROME_BIN' in os.environ:
extra_args.append('-PskipTests')
# Currently spinnaker is in a separate location
gradle_root = self.determine_gradle_root(name)
print 'Building and publishing Debian for {name}...'.format(name=name)
# Note: 'candidate' is just the gradle task name. It doesn't indicate
# 'release candidate' status for the artifacts created through this build.
return BackgroundProcess.spawn(
'Building and publishing Debian for {name}...'.format(name=name),
'cd "{gradle_root}"; ./gradlew {extra} {target}'.format(
gradle_root=gradle_root, extra=' '.join(extra_args), target=target)
)
def start_container_build(self, name):
"""Start a subprocess to build a container image of the subsystem.
Uses either Google Container Builder or Docker with configuration files
produced during BOM generation to build the container images. The
configuration files are assumed to be in the parent directory of the
subsystem's Gradle root.
Args:
name [string]: Name of the subsystem repository.
Returns:
BackgroundProcess
"""
gradle_root = self.determine_gradle_root(name)
if self.__options.container_builder == 'gcb':
return BackgroundProcess.spawn(
'Build/publishing container image for {name} with'
' Google Container Builder...'.format(name=name),
'cd "{gradle_root}"'
'; gcloud container builds submit --account={account} --project={project} --config="../{name}-gcb.yml" .'
.format(gradle_root=gradle_root, name=name, account=self.__gcb_service_account,
project=self.__options.gcb_project)
)
elif self.__options.container_builder == 'docker':
return BackgroundProcess.spawn(
'Build/publishing container image for {name} with Docker...'.format(
name=name),
'cd "{gradle_root}"'
' ; docker build -f Dockerfile -t $(cat ../{name}-docker.yml) .'
' ; docker push $(cat ../{name}-docker.yml)'
.format(gradle_root=gradle_root, name=name)
)
else:
raise NotImplemented(
'container_builder="{0}"'.format(self.__options.container_builder))
def publish_to_bintray(self, source, package, version, path, debian_tags=''):
bintray_key = os.environ['BINTRAY_KEY']
bintray_user = os.environ['BINTRAY_USER']
parts = self.__options.bintray_repo.split('/')
if len(parts) != 2:
raise ValueError(
'Expected --bintray_repo to be in the form <owner>/<repo>')
subject, repo = parts[0], parts[1]
deb_filename = os.path.basename(path)
if (deb_filename.startswith('spinnaker-')
and not package.startswith('spinnaker')):
package = 'spinnaker-' + package
if debian_tags and debian_tags[0] != ';':
debian_tags = ';' + debian_tags
url = ('https://api.bintray.com/content'
'/{subject}/{repo}/{package}/{version}/{path}'
'{debian_tags}'
';publish=1;override=1'
.format(subject=subject, repo=repo, package=package,
version=version, path=path,
debian_tags=debian_tags))
with open(source, 'r') as f:
data = f.read()
put_request = urllib2.Request(url)
encoded_auth = base64.encodestring('{user}:{pwd}'.format(
user=bintray_user, pwd=bintray_key))[:-1] # strip eoln
put_request.add_header('Authorization', 'Basic ' + encoded_auth)
put_request.get_method = lambda: 'PUT'
try:
result = urllib2.urlopen(put_request, data)
except HTTPError as put_error:
if put_error.code == 409 and self.__options.wipe_package_on_409:
# The problem here is that BinTray does not allow packages to change once
# they have been published (even though we are explicitly asking it to
# override). PATCH wont work either.
# Since we are building from source, we don't really have a version
# yet, since we are still modifying the code. Either we need to generate a new
# version number every time or we don't want to publish these.
# Ideally we could control whether or not to publish. However,
# if we do not publish, then the repository will not be visible without
# credentials, and adding conditional credentials into the packer scripts
# starts getting even more complex.
#
# We cannot seem to delete individual versions either (at least not for
# InstallSpinnaker.sh, which is where this problem seems to occur),
# so we'll be heavy handed and wipe the entire package.
print 'Got 409 on {url}.'.format(url=url)
delete_url = ('https://api.bintray.com/content'
'/{subject}/{repo}/{path}'
.format(subject=subject, repo=repo, path=path))
print 'Attempt to delete url={url} then retry...'.format(url=delete_url)
delete_request = urllib2.Request(delete_url)
delete_request.add_header('Authorization', 'Basic ' + encoded_auth)
delete_request.get_method = lambda: 'DELETE'
try:
urllib2.urlopen(delete_request)
print 'Deleted...'
except HTTPError as ex:
# Maybe it didn't exist. Try again anyway.
print 'Delete {url} got {ex}. Try again anyway.'.format(url=url, ex=ex)
print 'Retrying {url}'.format(url=url)
result = urllib2.urlopen(put_request, data)
print 'SUCCESS'
elif put_error.code != 400:
raise
else:
# Try creating the package and retrying.
pkg_url = os.path.join('https://api.bintray.com/packages',
subject, repo)
print 'Creating an entry for {package} with {pkg_url}...'.format(
package=package, pkg_url=pkg_url)
# All the packages are from spinnaker so we'll hardcode it.
# Note spinnaker-monitoring is a github repo with two packages.
# Neither is "spinnaker-monitoring"; that's only the github repo.
gitname = (package.replace('spinnaker-', '')
if not package.startswith('spinnaker-monitoring')
else 'spinnaker-monitoring')
pkg_data = """{{
"name": "{package}",
"licenses": ["Apache-2.0"],
"vcs_url": "https://github.com/spinnaker/{gitname}.git",
"website_url": "http://spinnaker.io",
"github_repo": "spinnaker/{gitname}",
"public_download_numbers": false,
"public_stats": false
}}'""".format(package=package, gitname=gitname)
pkg_request = urllib2.Request(pkg_url)
pkg_request.add_header('Authorization', 'Basic ' + encoded_auth)
pkg_request.add_header('Content-Type', 'application/json')
pkg_request.get_method = lambda: 'POST'
pkg_result = urllib2.urlopen(pkg_request, pkg_data)
pkg_code = pkg_result.getcode()
if pkg_code >= 200 and pkg_code < 300:
result = urllib2.urlopen(put_request, data)
code = result.getcode()
if code < 200 or code >= 300:
raise ValueError('{code}: Could not add version to {url}\n{msg}'
.format(code=code, url=url, msg=result.read()))
print 'Wrote {source} to {url}'.format(source=source, url=url)
def publish_install_script(self, source):
gradle_root = self.determine_gradle_root('spinnaker')
version = determine_package_version(gradle_root)
self.publish_to_bintray(source, package='spinnaker', version=version,
path='InstallSpinnaker.sh')
def publish_file(self, source, package, version):
"""Write a file to the bintray repository.
Args:
source [string]: The path to the source to copy must be local.
"""
path = os.path.basename(source)
debian_tags = ';'.join(['deb_component=spinnaker',
'deb_distribution=trusty,utopic,vivid,wily',
'deb_architecture=all'])
self.publish_to_bintray(source, package=package, version=version,
path=path, debian_tags=debian_tags)
def start_copy_debian_target(self, name):
"""Copies the debian package for the specified subsystem.
Args:
name [string]: The name of the subsystem repository.
"""
pids = []
gradle_root = self.determine_gradle_root(name)
version = determine_package_version(gradle_root)
if version is None:
return []
for root in determine_modules_with_debians(gradle_root):
deb_dir = '{root}/build/distributions'.format(root=root)
non_spinnaker_name = '{name}_{version}_all.deb'.format(
name=name, version=version)
if os.path.exists(os.path.join(deb_dir,
'spinnaker-' + non_spinnaker_name)):
deb_file = 'spinnaker-' + non_spinnaker_name
elif os.path.exists(os.path.join(deb_dir, non_spinnaker_name)):
deb_file = non_spinnaker_name
else:
module_name = os.path.basename(
os.path.dirname(os.path.dirname(deb_dir)))
deb_file = '{module_name}_{version}_all.deb'.format(
module_name=module_name, version=version)
if not os.path.exists(os.path.join(deb_dir, deb_file)):
error = ('.deb for name={name} version={version} is not in {dir}\n'
.format(name=name, version=version, dir=deb_dir))
raise AssertionError(error)
from_path = os.path.join(deb_dir, deb_file)
print 'Adding {path}'.format(path=from_path)
self.__package_list.append(from_path)
basename = os.path.basename(from_path)
module_name = basename[0:basename.find('_')]
if self.__options.bintray_repo:
self.publish_file(from_path, module_name, version)
return pids
def __do_build(self, subsys):
try:
self.start_deb_build(subsys).check_wait()
except Exception as ex:
self.__build_failures.append(subsys)
def __do_container_build(self, subsys):
try:
self.start_container_build(subsys).check_wait()
except Exception as ex:
print ex
self.__build_failures.append(subsys)
def build_container_images(self):
"""Build the Spinnaker packages as container images.
"""
subsystems = [comp for comp in SUBSYSTEM_LIST if comp != 'spinnaker']
subsystems.append('spinnaker-monitoring')
if self.__options.container_builder:
weighted_processes = self.__options.cpu_ratio * multiprocessing.cpu_count()
pool = multiprocessing.pool.ThreadPool(
processes=int(max(1, weighted_processes)))
pool.map(self.__do_container_build, subsystems)
if self.__build_failures:
if set(self.__build_failures).intersection(set(subsystems)):
raise RuntimeError('Builds failed for {0!r}'.format(
self.__build_failures))
else:
print 'Ignoring errors on optional subsystems {0!r}'.format(
self.__build_failures)
return
def build_packages(self):
"""Build all the Spinnaker packages."""
all_subsystems = []
all_subsystems.extend(SUBSYSTEM_LIST)
all_subsystems.extend(ADDITIONAL_SUBSYSTEMS)
if self.__options.build:
# Build in parallel using half available cores
# to keep load in check.
weighted_processes = self.__options.cpu_ratio * multiprocessing.cpu_count()
pool = multiprocessing.pool.ThreadPool(
processes=int(max(1, weighted_processes)))
pool.map(self.__do_build, all_subsystems)
if self.__build_failures:
if set(self.__build_failures).intersection(set(SUBSYSTEM_LIST)):
raise RuntimeError('Builds failed for {0!r}'.format(
self.__build_failures))
else:
print 'Ignoring errors on optional subsystems {0!r}'.format(
self.__build_failures)
if self.__options.nebula:
return
wait_on = set(all_subsystems).difference(set(self.__build_failures))
pool = multiprocessing.pool.ThreadPool(processes=len(wait_on))
print 'Copying packages...'
pool.map(self.__do_copy, wait_on)
return
def __do_copy(self, subsys):
print 'Starting to copy {0}...'.format(subsys)
pids = self.start_copy_debian_target(subsys)
for p in pids:
p.check_wait()
print 'Finished copying {0}.'.format(subsys)
@classmethod
def init_argument_parser(cls, parser):
refresh_source.Refresher.init_argument_parser(parser)
parser.add_argument('--build', default=True, action='store_true',
help='Build the sources.')
parser.add_argument(
'--cpu_ratio', type=float, default=1.25, # 125%
help='Number of concurrent threads as ratio of available cores.')
parser.add_argument('--nobuild', dest='build', action='store_false')
config_path = os.path.join(determine_project_root(), 'config')
parser.add_argument(
'--config_source', default=config_path,
help='Path to directory for release config file templates.')
parser.add_argument('--release_path', default='',
help='Specifies the path to the release to build.'
' The release name is assumed to be the basename.'
' The path can be a directory, GCS URI or S3 URI.')
parser.add_argument(
'--gcb_project', default='',
help='The google project id to publish containers to'
'if the container builder is gcp.')
parser.add_argument(
'--bintray_repo', default='',
help='Publish to this bintray repo.\n'
'This requires BINTRAY_USER and BINTRAY_KEY are set.')
parser.add_argument(
'--jar_repo', default='',
help='Publish produced jars to this repo.\n'
'This requires BINTRAY_USER and BINTRAY_KEY are set.')
parser.add_argument(
'--wipe_package_on_409', default=False, action='store_true',
help='Work around BinTray conflict errors by deleting the entire package'
' and retrying. Removes all prior versions so only intended for dev'
' repos.\n')
parser.add_argument(
'--nowipe_package_on_409', dest='wipe_package_on_409',
action='store_false')
parser.add_argument(
'--nebula', default=True, action='store_true',
help='Use nebula to build "candidate" target and upload to bintray.')
parser.add_argument(
'--nonebula', dest='nebula', action='store_false',
help='Explicitly "buildDeb" then curl upload them to bintray.')
parser.add_argument(
'--gcb_service_account', default='',
help='Google service account to invoke the gcp container builder with.')
def __verify_bintray(self):
if not os.environ.get('BINTRAY_KEY', None):
raise ValueError('BINTRAY_KEY environment variable not defined')
if not os.environ.get('BINTRAY_USER', None):
raise ValueError('BINTRAY_USER environment variable not defined')
@classmethod
def do_build(cls, options, build_number=None, container_builder=None):
if options.build and not (options.bintray_repo):
sys.stderr.write('ERROR: Missing a --bintray_repo')
return -1
builder = cls(options, build_number=build_number, container_builder=container_builder)
if options.pull_origin:
builder.refresher.pull_all_from_origin()
builder.build_packages()
if container_builder:
builder.build_container_images()
if options.build and options.bintray_repo:
fd, temp_path = tempfile.mkstemp()
with open(os.path.join(determine_project_root(), 'InstallSpinnaker.sh'),
'r') as f:
content = f.read()
match = re.search(
'REPOSITORY_URL="https://dl\.bintray\.com/(.+)"',
content)
content = ''.join([content[0:match.start(1)],
options.bintray_repo,
content[match.end(1):]])
os.write(fd, content)
os.close(fd)
try:
builder.publish_install_script(
os.path.join(determine_project_root(), temp_path))
finally:
os.remove(temp_path)
print '\nFINISHED writing release to {rep}'.format(
rep=options.bintray_repo)
@classmethod
def main(cls):
parser = argparse.ArgumentParser()
cls.init_argument_parser(parser)
options = parser.parse_args()
# builds debians only
cls.do_build(options)
if __name__ == '__main__':
sys.exit(Builder.main())
|
Roshan2017/spinnaker
|
dev/build_release.py
|
Python
|
apache-2.0
| 24,333
|
[
"ORCA"
] |
61d890cf3f43bd5803d9467025fac46561ce1ca5f134a001d65acb643d620b8a
|
"""extract_time_series.py is a thin wrapper around extract_time_series.ncl which interpolations to locations and heights.
Usage:
extract_time_series.py <file>... --loc=<file> --opt=<file> --height=<h>... [--mode=<mode>] [--out=<dir>] [-h | --help] [--ncl-code=<path>] [--dry-run]
Options:
--loc=<file> locations file containing latitude and longitude
--out=<dir> output netcdf file to write time-series
--opts=<file> location of a ncl file specifying which variables to extract
--mode=<mode> loop: loop over each input file seperately, lump: lump all input files together [default: loop]
--ncl-code=<path> location of extract_time_series.ncl [default: ./ncl]
--dry-run print commands but don't execute
-h |--help show this message
Notes:
This requires the extract_time_series.ncl script.
Examples:
python extract_time_series wrfout_d01_2010-01-* --out=./tseries --netcdf"""
import os
import sys
import docopt
import subprocess
import time
NCL_SCRIPT = 'extract_time_series.ncl'
def main():
""" Pass command line arguments to NCL script"""
args = docopt.docopt(__doc__, sys.argv[1:])
t0 = time.time()
if args['--out']==None:
out_dir = '.'
else:
out_dir = args['--out']
if args['--ncl-code']==None:
fullpath = os.path.realpath(sys.argv[0])
p,f = os.path.split(fullpath)
ncl_code_dir = '%s/ncl'% p
else:
ncl_code_dir = args['--ncl-code']
cmd_files = args['<file>']
# Add nc extension if needed
nc_files = [ f if f.endswith('.nc') else f+'.nc' for f in cmd_files]
# Create height arrays
hgts = args['--height']
hgts = '(/%s/)' % ','.join(hgts)
mode = args['--mode']
dry_run = args['--dry-run']
loc = args['--loc']
opt = args['--opt']
print '\n*****************************************************'
print 'extract_time_series.py'
print args
if mode=='loop':
# This will loop over each file seperately
for f in sorted(nc_files):
path,name = os.path.split(f)
out_file = out_dir+'/'+name.replace('wrfout', 'tseries')
if os.path.exists(out_file):
os.rm(out_file)
# Create NCL file array
in_file = f
#cmd = """FCST_FILE=%s NCL_OUT_FILE=%s LOCATIONS_FILE=%s NCL_OPT_FILE=%s ncl %s/%s 'extract_heights=%s' """ %(in_file, out_file, loc,opt, ncl_code_dir, NCL_SCRIPT, hgts)
cmd = """NCL_OPT_FILE=%s ncl 'in_file="%s"' 'out_file="%s"' 'extract_heights=%s' 'loc_file="%s"' %s/%s""" % (opt,in_file,out_file, hgts, loc, ncl_code_dir, NCL_SCRIPT)
print cmd
# We could either aggregate all files together or loop over files
if not dry_run:
subprocess.call(cmd, shell=True)
elif mode=='lump':
f = nc_files[0]
path,name = os.path.split(f)
out_file = out_dir+'/'+name.replace('wrfout', 'tseries')
if os.path.exists(out_file):
os.rm(out_file)
# Create NCL file array
files = '","'.join(sorted(nc_files))
in_file = '(/"%s"/)' % files
cmd = """NCL_OPT_FILE=%s ncl 'in_file=%s' 'out_file="%s"' 'extract_heights=%s' 'loc_file="%s"' %s/%s""" % (opt,in_file,out_file, hgts, loc, ncl_code_dir, NCL_SCRIPT)
print cmd
if not dry_run:
subprocess.call(cmd, shell=True)
te = time.time() - t0
print 'elapsed time: %0.1f ' % te
if __name__ == '__main__':
main()
|
samwisehawkins/wrftools
|
util/extract_time_series.py
|
Python
|
mit
| 3,657
|
[
"NetCDF"
] |
572e61dcc8e44228e56a1b9c91db0ca27ef41ead66e51861cb9fa033c42a96fc
|
# coding=utf-8
# Copyright 2018 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Waste your hard earned hypecoins here."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
from functools import wraps
import itertools
import random
import re
import threading
from absl import logging
from hypebot.core import schedule_lib
from hypebot.core import util_lib
from hypebot.plugins import playing_cards_lib
from hypebot.plugins import vegas_game_lib
from hypebot.protos import bet_pb2
from hypebot.protos import user_pb2
from typing import Dict, List, Optional, Text
# Double deck to allow people to count cards.
_NUM_DECKS = 2
# Maps card values to all of their potential points.
_CARD_POINTS = {
playing_cards_lib.ACE: [1, 11],
2: [2],
3: [3],
4: [4],
5: [5],
6: [6],
7: [7],
8: [8],
9: [9],
10: [10],
playing_cards_lib.JACK: [10],
playing_cards_lib.QUEEN: [10],
playing_cards_lib.KING: [10],
}
class Hand(object):
"""Collection of cards with blackjack game state."""
def __init__(self, bet, *cards):
self.bet = bet # type: bet_pb2.Bet
self.cards = list(cards) # type: List[playing_cards_lib.Card]
self.stand = False
def IsActive(self):
return not (self.IsBusted() or self.IsHypeJack() or self.stand)
def IsBusted(self):
return self.Score() > 21
def IsHypeJack(self):
return self.Score() == 21 and len(self.cards) == 2
def Score(self):
"""Computes the best possible score for the hand."""
points = _CARD_POINTS[self.cards[0].value]
for card in self.cards[1:]:
points = [
pts[0] + pts[1]
for pts in itertools.product(points, _CARD_POINTS[card.value])
]
non_bust = [p for p in points if p <= 21]
if non_bust:
score = max(non_bust)
if score == 21:
self.stand = True
return score
return min(points)
def __unicode__(self):
status_str = ''
if self.IsBusted():
status_str = '✕'
elif self.IsHypeJack():
status_str = '✪'
elif self.stand:
status_str = '✋'
return '[%s]%s' % (', '.join(map(unicode, self.cards)), status_str)
def HandFromMatch(fn):
"""Wrapper that calls the function with the correct hand.
Determines what hand was desired based on the following order:
1) Hand passed directly as hand kwarg.
2) Corresponding hand based on number specified in match kwarg.
Args:
fn: Function to wrap.
Returns:
Wrapped function.
"""
@wraps(fn)
def Wrapper(self, user: user_pb2.User, *args, **kwargs):
"""Internal wrapper."""
# pylint: disable=protected-access
with self._lock:
if user.user_id not in self._peeps:
self._msg_fn(
None, '%s: You are not playing in this round.' % user.display_name)
return
if 'hand' in kwargs:
return fn(self, user, *args, **kwargs)
# Default to first hand if none specified.
try:
hand_id = int(kwargs['match'].groups()[0])
except Exception: # pylint: disable=broad-except
hand_id = 0
try:
hand = self._peeps[user.user_id][hand_id]
except KeyError:
self._msg_fn(
None, '%s: Please specify a valid hand: 0 through %d' %
(user.display_name, len(self._peeps[user.user_id]) - 1))
return
if not hand.IsActive():
self._msg_fn(
None,
'%s: Hand %s is already complete.' % (user.display_name, hand_id))
return
kwargs['hand'] = hand
return fn(self, user, *args, **kwargs)
# pylint: enable=protected-access
return Wrapper
class Game(vegas_game_lib.GameBase):
"""Blackjack style game."""
# Seconds after first bet until round starts
ROUND_DELAY = 5
# Seconds that users have to complete their hands before they are auto-stood.
# Prevents a user from betting and walking away.
MAX_ROUND_LENGTH = 60
def __init__(self, channel, core, msg_fn):
# Used for thread safe access to class data.
self._lock = threading.RLock()
# Condition variable used to force end the game after a certain amount of
# time has passed.
self._game_ender = threading.Condition(lock=self._lock)
self.channel = channel
self._core = core
self._msg_fn = msg_fn
self._pending_start = False
self._active_round = False
self._scheduler = schedule_lib.HypeScheduler()
# Maps users to their hands for the active round.
self._peeps = {} # type: Dict[Text, List[Hand]]
self._dealer_hand = None # type: Hand
self._shoe = []
# ============================================================================
# GameBase abstract signature.
# ============================================================================
@property
def name(self):
return self.channel.name
# Do not take any bets from random channels. We directly place bets ourselves.
def TakeBet(self, bet):
return False
def FormatBet(self, bet):
return u'%s %s %s in %s' % (util_lib.FormatHypecoins(
bet.amount), bet_pb2.Bet.Direction.Name(
bet.direction).lower(), bet.target, self.name)
def SettleBets(self, pool, msg_fn, *args, **kwargs):
with self._lock:
winners = defaultdict(int)
users_by_id = {}
for user_id, user_bets in pool.items():
if user_id not in self._peeps:
# This means the game wasn't finished. Either user timed out or prior
# crash. Hypebot steals the bet either way.
continue
users_by_id[user_id] = user_bets[0].user
for bet in user_bets:
hand_id = int(bet.target.split('-')[-1])
hand = self._peeps[user_id][hand_id]
result_str = 'lost'
if hand.IsBusted():
result_str = 'busted'
elif hand.IsHypeJack():
if self._dealer_hand.IsHypeJack():
result_str = 'pushed'
winners[user_id] += bet.amount
else:
result_str = 'hypejack!'
winners[user_id] += bet.amount * 5 // 2
elif (self._dealer_hand.IsBusted() or
hand.Score() > self._dealer_hand.Score()):
winners[user_id] += bet.amount * 2
result_str = 'won'
elif hand.Score() == self._dealer_hand.Score():
winners[user_id] += bet.amount
result_str = 'pushed'
self._msg_fn(
None,
'%s: %s %s' % (bet.user.display_name, unicode(hand), result_str))
return ({
users_by_id[user_id]: amount for user_id, amount in winners.items()
}, {}, [])
# ============================================================================
# HypeJack logic.
# ============================================================================
def HandleMessage(self, user: user_pb2.User, msg: Text):
with self._lock:
hand_regex = r' ?([0-9]*)'
bet_match = re.match(r'^b(?:et)? ([0-9]+)', msg)
double_match = re.match(r'^d(?:ouble)?%s' % hand_regex, msg)
hit_match = re.match(r'^h(?:it)?%s' % hand_regex, msg)
stand_match = re.match(r'^st(?:and)?%s' % hand_regex, msg)
split_match = re.match(r'^sp(?:lit)?%s' % hand_regex, msg)
help_match = re.match(r'^h[ae]lp', msg)
if bet_match:
self.Bet(user, bet_match)
elif help_match:
# Help before hit since they will both match `help`.
self.Help(user)
elif double_match:
self.Double(user, match=double_match)
elif hit_match:
self.Hit(user, match=hit_match)
elif stand_match:
self.Stand(user, match=stand_match)
elif split_match:
self.Split(user, match=split_match)
self._PossiblyEndRound()
# ============================================================================
# User commands.
# ============================================================================
def Bet(self, user: user_pb2.User, match):
with self._lock:
if self._active_round:
self._msg_fn(None, '%s: Round is currently active.' % user.display_name)
return
amount = self._core.bank.ParseAmount(user,
match.groups()[0], self._msg_fn)
bet = bet_pb2.Bet(
user=user,
amount=amount,
resolver=self._core.name.lower(),
direction=bet_pb2.Bet.FOR,
target='hand-0')
if not self._core.bets.PlaceBet(self, bet, self._msg_fn):
return
self._msg_fn(None, '%s joined the round.' % user.display_name)
if not self._pending_start:
self._pending_start = True
self._msg_fn(None, 'Round starting soon, type "bet [amount]" to join.')
self._scheduler.InSeconds(self.ROUND_DELAY, self.PlayRound)
@HandFromMatch
def Double(self,
user: user_pb2.User,
hand: Optional[Hand] = None,
match=None):
if not hand:
return
with self._lock:
logging.info('Prior Bet: %s', hand.bet)
hand.bet.amount *= 2
if not self._core.bets.PlaceBet(self, hand.bet, self._msg_fn):
self._msg_fn(None,
'%s: Not enough hypecoins to double.' % user.display_name)
hand.bet.amount /= 2
return
self.Hit(user, hand=hand)
self.Stand(user, hand=hand)
self._DisplayUser(user)
def Help(self, user: user_pb2.User):
lines = """HypeJack bears a strong resemblence to a popular casino game.
Commands:
* bet [amount]: signal intent to play in the round.
* hit [hand_id]: request a card for hand_id.
* stand [hand_id]: wait for dealer and compare hands.
* split [hand_id]: split a hand of same value cards into two hands.
* double [hand_id]: double your bet, take a single hit, and stand.
""".split('\n')
self._msg_fn(user, lines)
@HandFromMatch
def Hit(self,
user: user_pb2.User,
hand: Optional[Hand] = None,
match=None):
if not hand:
return
with self._lock:
hand.cards.append(self._shoe.pop())
self._DisplayUser(user)
@HandFromMatch
def Stand(self,
user: user_pb2.User,
hand: Optional[Hand] = None,
match=None):
if not hand:
return
with self._lock:
hand.stand = True
self._DisplayUser(user)
@HandFromMatch
def Split(self,
user: user_pb2.User,
hand: Optional[Hand] = None,
match=None):
if not hand:
return
with self._lock:
if (len(hand.cards) != 2 or _CARD_POINTS[hand.cards[0].value] !=
_CARD_POINTS[hand.cards[1].value]):
self._msg_fn(
None, '%s: Can only split 2 equal value cards.' % user.display_name)
return
new_bet = bet_pb2.Bet()
new_bet.CopyFrom(hand.bet)
new_bet.target = 'hand-%d' % len(self._peeps[user.user_id])
if not self._core.bets.PlaceBet(self, new_bet, self._msg_fn):
self._msg_fn(None,
'%s: Not enough hypecoins to split.' % user.display_name)
return
new_hand = Hand(new_bet, hand.cards.pop())
self._peeps[user.user_id].append(new_hand)
self.Hit(user, hand=hand)
self.Hit(user, hand=new_hand)
self._DisplayUser(user)
# ============================================================================
# Game logic.
# ============================================================================
def PlayRound(self):
"""Plays one round of HypeJack with all active players.
Should be called in a separate thread since it will sleep until the game
timeout unless woken by all peeps completing their hands.
"""
with self._lock:
if self._active_round:
logging.error('HypeJack game already active.')
return
bets = self._core.bets.LookupBets(
self.name, resolver=self._core.name.lower())
if not bets:
logging.error('Attempted to start HypeJack with no players.')
return
self._pending_start = False
# Shuffle the deck when it gets low. We assume a reasonable number of
# cards needed per player, but with lots of splits / low cards we may
# still run out of cards to play the hand.
if len(self._shoe) < (len(self._peeps) + 1) * 7:
self._ShuffleCards()
# Deal cards to plebs.
for user_id, user_bets in bets.items():
hand = Hand(user_bets[0], self._shoe.pop(), self._shoe.pop())
self._peeps[user_id] = [hand]
self._DisplayUser(user_bets[0].user)
# Deal cards to hypebot.
self._dealer_hand = Hand(None, self._shoe.pop(), self._shoe.pop())
# self._dealer_hand = Hand(playing_cards_lib.Card('Hearts', 8),
# playing_cards_lib.Card('Spades', 8))
self._msg_fn(None, 'Dealer: [%s, %s]' % (self._dealer_hand.cards[0], '🂠'))
self._active_round = True
# Short-circuit game play if the dealer has a hypejack or if all peeps
# have hypejacks.
if not self._dealer_hand.IsHypeJack() and any(
[self._IsActive(user_id) for user_id in self._peeps.keys()]):
# Force the round to end after some time if some peep ran away. Waiting
# on a condition releases the lock while waiting, then reacquires it
# automatically. Will shortcircuit if notified when all peeps have
# finished their hands.
self._game_ender.wait(timeout=self.MAX_ROUND_LENGTH)
# Complete dealer hand.
self._msg_fn(None, 'Dealer: %s' % self._dealer_hand)
while self._dealer_hand.Score() < 17:
self._dealer_hand.cards.append(self._shoe.pop())
self._msg_fn(None, 'Dealer: %s' % self._dealer_hand)
self._core.bets.SettleBets(self, self._core.name.lower(), self._msg_fn)
# Reset game state.
self._peeps = {}
self._active_round = False
def _ShuffleCards(self):
with self._lock:
self._msg_fn(None, 'Shuffling cards.')
self._shoe = []
for _ in range(_NUM_DECKS):
self._shoe.extend(playing_cards_lib.BuildDeck())
random.shuffle(self._shoe)
def _DisplayUser(self, user: user_pb2.User):
with self._lock:
if user in self._peeps and len(self._peeps[user.user_id]):
hands = self._peeps[user.user_id]
self._msg_fn(
None, '%s: %s' % (user.display_name, ', '.join([
'%s:%s' % (i, unicode(hand)) for i, hand in enumerate(hands)
])))
def _IsActive(self, user_id: Text):
"""Check if user has any active hands."""
with self._lock:
return (user_id in self._peeps and
any([hand.IsActive() for hand in self._peeps[user_id]]))
def _PossiblyEndRound(self):
"""End round if no users are active."""
with self._lock:
if all([not self._IsActive(user_id) for user_id in self._peeps.keys()]):
self._game_ender.notify()
|
google/hypebot
|
hypebot/plugins/hypejack_lib.py
|
Python
|
apache-2.0
| 15,524
|
[
"CASINO"
] |
06e87332f5e59fda469dfb3ae82b05cd07badc01c313e3c20e7faf9dba4f15ea
|
from __future__ import with_statement
import os
import re
import platform
import time
import fnmatch
import tempfile
from os import environ
from sos.utilities import (ImporterHelper,
import_module,
shell_out)
from sos.plugins import IndependentPlugin, ExperimentalPlugin
from sos import _sos as _
from textwrap import fill
from six import print_
from six.moves import input
def import_policy(name):
policy_fqname = "sos.policies.%s" % name
try:
return import_module(policy_fqname, Policy)
except ImportError:
return None
def load(cache={}, sysroot=None):
if 'policy' in cache:
return cache.get('policy')
import sos.policies
helper = ImporterHelper(sos.policies)
for module in helper.get_modules():
for policy in import_policy(module):
if policy.check():
cache['policy'] = policy(sysroot=sysroot)
if 'policy' not in cache:
cache['policy'] = GenericPolicy()
return cache['policy']
class PackageManager(object):
"""Encapsulates a package manager. If you provide a query_command to the
constructor it should print each package on the system in the following
format::
package name|package.version
You may also subclass this class and provide a get_pkg_list method to
build the list of packages and versions.
"""
query_command = None
chroot = None
def __init__(self, query_command=None, chroot=None):
self.packages = {}
if query_command:
self.query_command = query_command
if chroot:
self.chroot = chroot
def all_pkgs_by_name(self, name):
"""
Return a list of packages that match name.
"""
return fnmatch.filter(self.all_pkgs().keys(), name)
def all_pkgs_by_name_regex(self, regex_name, flags=0):
"""
Return a list of packages that match regex_name.
"""
reg = re.compile(regex_name, flags)
return [pkg for pkg in self.all_pkgs().keys() if reg.match(pkg)]
def pkg_by_name(self, name):
"""
Return a single package that matches name.
"""
pkgmatches = self.all_pkgs_by_name(name)
if (len(pkgmatches) != 0):
return self.all_pkgs_by_name(name)[-1]
else:
return None
def get_pkg_list(self):
"""Returns a dictionary of packages in the following
format::
{'package_name': {'name': 'package_name',
'version': 'major.minor.version'}}
"""
if self.query_command:
cmd = self.query_command
pkg_list = shell_out(
cmd, timeout=0, chroot=self.chroot
).splitlines()
for pkg in pkg_list:
if '|' not in pkg:
continue
name, version = pkg.split("|")
self.packages[name] = {
'name': name,
'version': version.split(".")
}
return self.packages
def all_pkgs(self):
"""
Return a list of all packages.
"""
if not self.packages:
self.packages = self.get_pkg_list()
return self.packages
def pkg_nvra(self, pkg):
fields = pkg.split("-")
version, release, arch = fields[-3:]
name = "-".join(fields[:-3])
return (name, version, release, arch)
class Policy(object):
msg = _("""\
This command will collect system configuration and diagnostic information \
from this %(distro)s system. An archive containing the collected information \
will be generated in %(tmpdir)s.
For more information on %(vendor)s visit:
%(vendor_url)s
The generated archive may contain data considered sensitive and its content \
should be reviewed by the originating organization before being passed to \
any third party.
No changes will be made to system configuration.
%(vendor_text)s
""")
distro = "Unknown"
vendor = "Unknown"
vendor_url = "http://www.example.com/"
vendor_text = ""
PATH = ""
_in_container = False
_host_sysroot = '/'
def __init__(self, sysroot=None):
"""Subclasses that choose to override this initializer should call
super() to ensure that they get the required platform bits attached.
super(SubClass, self).__init__(). Policies that require runtime
tests to construct PATH must call self.set_exec_path() after
modifying PATH in their own initializer."""
self._parse_uname()
self.report_name = self.hostname
self.case_id = None
self.package_manager = PackageManager()
self._valid_subclasses = []
self.set_exec_path()
self._host_sysroot = sysroot
def get_valid_subclasses(self):
return [IndependentPlugin] + self._valid_subclasses
def set_valid_subclasses(self, subclasses):
self._valid_subclasses = subclasses
def del_valid_subclasses(self):
del self._valid_subclasses
valid_subclasses = property(get_valid_subclasses,
set_valid_subclasses,
del_valid_subclasses,
"list of subclasses that this policy can "
"process")
def check(self):
"""
This function is responsible for determining if the underlying system
is supported by this policy.
"""
return False
def in_container(self):
""" Returns True if sos is running inside a container environment.
"""
return self._in_container
def host_sysroot(self):
return self._host_sysroot
def dist_version(self):
"""
Return the OS version
"""
pass
def get_preferred_archive(self):
"""
Return the class object of the prefered archive format for this
platform
"""
from sos.archive import TarFileArchive
return TarFileArchive
def get_archive_name(self):
"""
This function should return the filename of the archive without the
extension.
"""
if self.case_id:
self.report_name += "." + self.case_id
return "sosreport-%s-%s" % (self.report_name,
time.strftime("%Y%m%d%H%M%S"))
def get_tmp_dir(self, opt_tmp_dir):
if not opt_tmp_dir:
return tempfile.gettempdir()
return opt_tmp_dir
def match_plugin(self, plugin_classes):
if len(plugin_classes) > 1:
for p in plugin_classes:
# Give preference to the first listed tagging class
# so that e.g. UbuntuPlugin is chosen over DebianPlugin
# on an Ubuntu installation.
if issubclass(p, self.valid_subclasses[0]):
return p
return plugin_classes[0]
def validate_plugin(self, plugin_class, experimental=False):
"""
Verifies that the plugin_class should execute under this policy
"""
valid_subclasses = [IndependentPlugin] + self.valid_subclasses
if experimental:
valid_subclasses += [ExperimentalPlugin]
return any(issubclass(plugin_class, class_) for
class_ in valid_subclasses)
def pre_work(self):
"""
This function is called prior to collection.
"""
pass
def post_work(self):
"""
This function is called after the sosreport has been generated.
"""
pass
def pkg_by_name(self, pkg):
return self.package_manager.pkg_by_name(pkg)
def _parse_uname(self):
(system, node, release,
version, machine, processor) = platform.uname()
self.system = system
self.hostname = node
self.release = release
self.smp = version.split()[1] == "SMP"
self.machine = machine
def set_commons(self, commons):
self.commons = commons
def _set_PATH(self, path):
environ['PATH'] = path
def set_exec_path(self):
self._set_PATH(self.PATH)
def is_root(self):
"""This method should return true if the user calling the script is
considered to be a superuser"""
return (os.getuid() == 0)
def get_preferred_hash_name(self):
"""Returns the string name of the hashlib-supported checksum algorithm
to use"""
return "md5"
def display_results(self, archive, directory, checksum):
# Display results is called from the tail of SoSReport.final_work()
#
# Logging is already shutdown and all terminal output must use the
# print() call.
# make sure a report exists
if not archive and not directory:
return False
self._print()
if archive:
self._print(_("Your sosreport has been generated and saved "
"in:\n %s") % archive)
else:
self._print(_("sosreport build tree is located at : %s" %
directory))
self._print()
if checksum:
self._print(_("The checksum is: ") + checksum)
self._print()
self._print(_("Please send this file to your support "
"representative."))
self._print()
def _print(self, msg=None):
"""A wrapper around print that only prints if we are not running in
quiet mode"""
if not self.commons['cmdlineopts'].quiet:
if msg:
print_(msg)
else:
print_()
def get_msg(self):
"""This method is used to prepare the preamble text to display to
the user in non-batch mode. If your policy sets self.distro that
text will be substituted accordingly. You can also override this
method to do something more complicated."""
width = 72
_msg = self.msg % {'distro': self.distro, 'vendor': self.vendor,
'vendor_url': self.vendor_url,
'vendor_text': self.vendor_text,
'tmpdir': self.commons['tmpdir']}
_fmt = ""
for line in _msg.splitlines():
_fmt = _fmt + fill(line, width, replace_whitespace=False) + '\n'
return _fmt
class GenericPolicy(Policy):
"""This Policy will be returned if no other policy can be loaded. This
should allow for IndependentPlugins to be executed on any system"""
def get_msg(self):
return self.msg % {'distro': self.system}
class LinuxPolicy(Policy):
"""This policy is meant to be an abc class that provides common
implementations used in Linux distros"""
distro = "Linux"
vendor = "None"
PATH = "/bin:/sbin:/usr/bin:/usr/sbin"
_preferred_hash_name = None
def __init__(self, sysroot=None):
super(LinuxPolicy, self).__init__(sysroot=sysroot)
def get_preferred_hash_name(self):
if self._preferred_hash_name:
return self._preferred_hash_name
checksum = "md5"
try:
fp = open("/proc/sys/crypto/fips_enabled", "r")
except:
self._preferred_hash_name = checksum
return checksum
fips_enabled = fp.read()
if fips_enabled.find("1") >= 0:
checksum = "sha256"
fp.close()
self._preferred_hash_name = checksum
return checksum
def default_runlevel(self):
try:
with open("/etc/inittab") as fp:
pattern = r"id:(\d{1}):initdefault:"
text = fp.read()
return int(re.findall(pattern, text)[0])
except:
return 3
def kernel_version(self):
return self.release
def host_name(self):
return self.hostname
def is_kernel_smp(self):
return self.smp
def get_arch(self):
return self.machine
def get_local_name(self):
"""Returns the name usd in the pre_work step"""
return self.host_name()
def sanitize_report_name(self, report_name):
return re.sub(r"[^-a-zA-Z.0-9]", "", report_name)
def sanitize_case_id(self, case_id):
return re.sub(r"[^-a-z,A-Z.0-9]", "", case_id)
def pre_work(self):
# this method will be called before the gathering begins
cmdline_opts = self.commons['cmdlineopts']
customer_name = cmdline_opts.customer_name
localname = customer_name if customer_name else self.get_local_name()
caseid = cmdline_opts.case_id if cmdline_opts.case_id else ""
if not cmdline_opts.batch and not \
cmdline_opts.quiet:
try:
self.report_name = input(_("Please enter your first initial "
"and last name [%s]: ") % localname)
self.case_id = input(_("Please enter the case id "
"that you are generating this "
"report for [%s]: ") % caseid)
self._print()
except:
self._print()
self.report_name = localname
if len(self.report_name) == 0:
self.report_name = localname
if customer_name:
self.report_name = customer_name
if cmdline_opts.case_id:
self.case_id = cmdline_opts.case_id
self.report_name = self.sanitize_report_name(self.report_name)
if self.case_id:
self.case_id = self.sanitize_case_id(self.case_id)
if (self.report_name == ""):
self.report_name = "default"
return
# vim: set et ts=4 sw=4 :
|
cnewcome/sos
|
sos/policies/__init__.py
|
Python
|
gpl-2.0
| 13,850
|
[
"VisIt"
] |
f5eb2bffde239b766b88522c45a6b6bdf931e5ace922c50c5bee17406d06a7da
|
"""
**hep_ml.reweight** contains reweighting algorithms.
Reweighting is procedure of finding such weights for original distribution,
that make distribution of one or several variables identical in original distribution and target distribution.
Remark: if each variable has identical distribution in two samples,
this doesn't imply that multidimensional distributions are equal (almost surely they aren't).
Aim of reweighters is to get identical multidimensional distributions.
Algorithms are implemented as estimators, fitting and reweighting stages are split.
Fitted reweighter can be applied many times to different data, pickled and so on.
Examples
________
The most common use case is reweighting of Monte-Carlo simulations results to sPlotted real data.
(original weights are all equal to 1 and could be skipped, but left here for example)
>>> from hep_ml.reweight import BinsReweighter, GBReweighter
>>> original_weights = numpy.ones(len(MC_data))
>>> reweighter = BinsReweighter(n_bins=100, n_neighs=3)
>>> reweighter.fit(original=MC_data, target=RealData,
>>> original_weight=original_weights, target_weight=sWeights)
>>> MC_weights = reweighter.predict_weights(MC_data, original_weight=original_weights)
The same example for `GBReweighter`:
>>> reweighter = GBReweighter(max_depth=2, other_args={'subsample': 0.5})
>>> reweighter.fit(original=MC_data, target=RealData, target_weight=sWeights)
>>> MC_weights = reweighter.predict_weights(MC_data)
"""
from __future__ import division, print_function, absolute_import
from sklearn.base import BaseEstimator
from scipy.ndimage import gaussian_filter
from hep_ml.commonutils import check_sample_weight, weighted_quantile
from hep_ml import gradientboosting as gb
from hep_ml import losses
from warnings import warn
import numpy
__author__ = 'Alex Rogozhnikov'
__all__ = ['BinsReweighter', 'GBReweighter']
warn("Module hep_ml.reweight is unstable, it's API may be changed in near future.")
def bincount_nd(x, weights, shape):
"""
Does the same thing as numpy.bincount, but allows binning in several integer variables.
:param x: numpy.array of shape [n_samples, n_features] with non-negative integers
:param weights: weights of samples, array of shape [n_samples]
:param shape: shape of result, should be greater, then maximal value
:return: weighted number of event in each bin, of shape=shape
"""
assert len(weights) == len(x), 'length of weight is different: {} {}'.format(len(x), len(weights))
assert x.shape[1] == len(shape), 'wrong length of shape: {} {}'.format(x.shape[1], len(shape))
maximals = numpy.max(x, axis=0)
assert numpy.all(maximals < shape), 'smaller shape: {} {}'.format(maximals, shape)
result = numpy.zeros(shape, dtype=float)
numpy.add.at(result, tuple(x.T), weights)
return result
class ReweighterMixin(object):
"""Supplementary class which shows the interface of reweighter.
Reweighters should be derived from this class."""
n_features_ = None
def _normalize_input(self, data, weights):
""" Normalize input of reweighter
:param data: array like of shape [n_samples] or [n_samples, n_features]
:param weights: array-like of shape [n_samples] or None
:return: tuple with
data - numpy.array of shape [n_samples, n_features]
weights - numpy.array of shape [n_samples] with mean = 1.
"""
weights = check_sample_weight(data, sample_weight=weights, normalize=True)
data = numpy.array(data)
if len(data.shape) == 1:
data = data[:, numpy.newaxis]
if self.n_features_ is None:
self.n_features_ = data.shape[1]
assert self.n_features_ == data.shape[1], \
'number of features is wrong: {} {}'.format(self.n_features_, data.shape[1])
return data, weights
def fit(self, original, target, original_weight, target_weight):
raise NotImplementedError('To be overriden in descendants')
def predict_weights(self, original, original_weight=None):
raise NotImplementedError('To be overriden in descendants')
class BinsReweighter(BaseEstimator, ReweighterMixin):
def __init__(self, n_bins=200, n_neighs=3.):
"""
Use bins for reweighting. Bins' edges are computed using quantiles along each axis
(which is better than bins of even size).
This method works fine for 1d/2d histograms,
while being quite unstable or inaccurate for higher dimensions.
:param int n_bins: how many bins to use for each input variable.
:param int n_neighs: size of gaussian filter (in bins).
This parameter is responsible for tradeoff between stability of rule and accuracy of predictions.
With increase of n_neighs the
"""
self.n_percentiles = n_bins
self.n_neighs = n_neighs
# if number of events in bins is less than this value, number of events is clipped.
self.min_in_the_bin = 1.
def compute_bin_indices(self, data):
"""
Compute id of bin along each axis.
:param data: data, array-like of shape [n_samples, n_features]
with the same order of features as in training
:return: numpy.array of shape [n_samples, n_features] with integers, each from [0, n_bins - 1]
"""
bin_indices = []
for axis, axis_edges in enumerate(self.edges):
bin_indices.append(numpy.searchsorted(axis_edges, data[:, axis]))
return numpy.array(bin_indices).T
def fit(self, original, target, original_weight=None, target_weight=None):
"""
Prepare reweighting formula by computing histograms.
:param original: values from original distribution, array-like of shape [n_samples, n_features]
:param target: values from target distribution, array-like of shape [n_samples, n_features]
:param original_weight: weights for samples of original distributions
:param target_weight: weights for samples of original distributions
:return: self
"""
self.n_features_ = None
original, original_weight = self._normalize_input(original, original_weight)
target, target_weight = self._normalize_input(target, target_weight)
target_perc = numpy.linspace(0, 1, self.n_percentiles + 1)[1:-1]
self.edges = []
for axis in range(self.n_features_):
self.edges.append(weighted_quantile(target[:, axis], quantiles=target_perc, sample_weight=target_weight))
bins_weights = []
for data, weights in [(original, original_weight), (target, target_weight)]:
bin_indices = self.compute_bin_indices(data)
bin_w = bincount_nd(bin_indices, weights=weights, shape=[self.n_percentiles] * self.n_features_)
smeared_weights = gaussian_filter(bin_w, sigma=self.n_neighs, truncate=2.5)
bins_weights.append(smeared_weights.clip(self.min_in_the_bin))
bin_orig_weights, bin_targ_weights = bins_weights
self.transition = bin_targ_weights / bin_orig_weights
return self
def predict_weights(self, original, original_weight=None):
"""
Returns corrected weights. Result is computed as original_weight * reweighter_multipliers.
:param original: values from original distribution of shape [n_samples, n_features]
:param original_weight: weights of samples before reweighting.
:return: numpy.array of shape [n_samples] with new weights.
"""
original, original_weight = self._normalize_input(original, original_weight)
bin_indices = self.compute_bin_indices(original)
results = self.transition[tuple(bin_indices.T)] * original_weight
return results
class GBReweighter(BaseEstimator, ReweighterMixin):
def __init__(self,
n_estimators=40,
learning_rate=0.2,
max_depth=3,
min_samples_leaf=200,
gb_args=None):
"""
Gradient Boosted Reweighter - a reweighter algorithm based on ensemble of regression trees.
Parameters have the same role, as in gradient boosting.
Special loss function is used, trees are trained to maximize symmetrized binned chi-squared statistics.
Training takes much more time than for bin-based versions, but `GBReweighter` is capable
to work in high dimensions while keeping reweighting rule reliable and precise
(and even smooth if many trees are used).
:param n_estimators: number of trees
:param learning_rate: float from [0, 1]. Lesser learning rate requires more trees,
but makes reweighting rule more stable.
:param max_depth: maximal depth of trees
:param min_samples_leaf: minimal number of events in the leaf. If many
:param gb_args: other parameters passed to gradient boosting.
See :class:`hep_ml.gradientboosting.UGradientBoostingClassifier`
"""
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.gb_args = gb_args
def fit(self, original, target, original_weight=None, target_weight=None):
"""
Prepare reweighting formula by training sequence of trees.
:param original: values from original distribution, array-like of shape [n_samples, n_features]
:param target: values from target distribution, array-like of shape [n_samples, n_features]
:param original_weight: weights for samples of original distributions
:param target_weight: weights for samples of original distributions
:return: self
"""
self.n_features_ = None
if self.gb_args is None:
self.gb_args = {}
original, original_weight = self._normalize_input(original, original_weight)
target, target_weight = self._normalize_input(target, target_weight)
self.gb = gb.UGradientBoostingClassifier(loss=losses.ReweightLossFunction(),
n_estimators=self.n_estimators,
max_depth=self.max_depth,
min_samples_leaf=self.min_samples_leaf,
learning_rate=self.learning_rate,
**self.gb_args)
data = numpy.vstack([original, target])
target = numpy.array([1] * len(original) + [0] * len(target))
weights = numpy.hstack([original_weight, target_weight])
self.gb.fit(data, target, sample_weight=weights)
return self
def predict_weights(self, original, original_weight=None):
"""
Returns corrected weights. Result is computed as original_weight * reweighter_multipliers.
:param original: values from original distribution of shape [n_samples, n_features]
:param original_weight: weights of samples before reweighting.
:return: numpy.array of shape [n_samples] with new weights.
"""
original, original_weight = self._normalize_input(original, original_weight)
multipliers = numpy.exp(self.gb.decision_function(original))
return multipliers * original_weight
|
anaderi/hep_ml
|
hep_ml/reweight.py
|
Python
|
apache-2.0
| 11,419
|
[
"Gaussian"
] |
2e7ca8ec167ae1a47491f68a06419494cb600a278c6264ad91cca2f20d1da3df
|
# Tear.py
# Aaron Taylor
# Moose Abumeeiz
#
# The tear can be shot from an enemy or the main character.
# It will hurt anything of the oppisite type (enemies and good guys)
#
from pygame import *
from const import *
from random import randint
from Animation import *
class Tear:
"""Main tear class"""
def __init__(self, xyv, xy, ixy, speed, damage, shotRange, friendly, textures, sounds):
self.xVel, self.yVel = xyv # X, Y velocity
# Stats
self.speed = int(speed*2)+4
self.damage = damage+3
self.friendly = friendly
self.range = (shotRange*20)+200
self.distance = 0
# sounds
self.sounds = sounds
self.x = xy[0]
self.y = xy[1]
# Inherited x and y velocity
self.iXVel = ixy[0]
self.iYVel = ixy[1]
self.poped = False
self.frames = [textures[1].subsurface(Rect((i*128 - ((i)//4)*128*4), ((i//4)*128), 128, 128)) for i in range(12)]
self.popping = Animation(self.frames, 0.24)
self.ox = self.x
self.oy = self.y
offX = 0
offY = 0
if damage > 7:
offX = -7
offY = 1
if not friendly:
offY += 2
# Play random shoot sound
sounds[randint(0,1)].play()
# Texture setup
self.texture = textures[0].subsurface(Rect((self.damage+offX)*64, offY*64, 64, 64))
self.width = self.texture.get_width()
self.height = self.texture.get_height()
def step(self):
self.texture = self.frames[self.frameIndex]
self.frameIndex += 1
def pop(self, collision):
self.poped = True
if collision:
self.sounds[2].play() # Play collison pop
else:
self.sounds[1].play() # Play normal pop
return True
def render(self, surface, time, bounds, obsticals):
if self.poped:
# Return popping tear
frame = self.popping.render(time)
if self.popping.looped:
return False
surface.blit(frame, (self.x-self.popping.width//2, self.y-self.popping.height//2))
return True
if abs(self.x-self.ox) < self.range and abs(self.y-self.oy) < self.range:
dx = 0
dy = 0
dx += self.xVel * self.speed
dy += self.yVel * self.speed
# Add inherited X and Y velocity
dx += self.iXVel
dy += self.iYVel
inBoundsX = bounds.collidepoint(self.x+dx, self.y)
inBoundsY = bounds.collidepoint(self.x, self.y+dx)
rockColX = False
rockColY = False
for ob in obsticals:
# Collide with ob
try:
if ob.destroyed:
continue
except:
pass
# Collude with object
rcx = ob.bounds.collidepoint(self.x+self.speed, self.y)
rcy = ob.bounds.collidepoint(self.x, self.y+self.speed)
if rcx or rcy:
try:
ob.hurt(1)
except:
pass
if not ob.collideable:
rockColX = rockColY = False
return self.pop(True)
if not inBoundsX or not inBoundsY:
# Ensure tear is within level bounds
return self.pop(True)
# Add to x and y
self.x += dx
self.y += dy
surface.blit(self.texture, (self.x-self.width//2, self.y-self.height//2))
return True
return self.pop(False)
|
ExPHAT/binding-of-isaac
|
Tear.py
|
Python
|
mit
| 2,931
|
[
"MOOSE"
] |
d34f606c3f0457d31394cee6e4b5bb11e302635190ba581b922d14595d41e8b0
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
from __future__ import with_statement
import logging
import threading
import time
from concurrent.futures._compat import reraise
try:
from collections import namedtuple
except ImportError:
from concurrent.futures._compat import namedtuple
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._traceback = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
reraise(self._exception, self._traceback)
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception_info(self, timeout=None):
"""Return a tuple of (exception, traceback) raised by the call that the
future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
return self.exception_info(timeout)[0]
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self.future),
self.future._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception_info(self, exception, traceback):
"""Sets the result of the future as being the given exception
and traceback.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._traceback = traceback
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
self.set_exception_info(exception, None)
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
|
LockScreen/Backend
|
venv/lib/python2.7/site-packages/concurrent/futures/_base.py
|
Python
|
mit
| 20,830
|
[
"Brian"
] |
086d538016cdf23c03514a4ede24a5d85df6dcab9604e3426c91793fe71b5689
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Optional
import urllib
from xml.etree.ElementTree import Element
from kivy.logger import Logger
from kivy.event import EventDispatcher
from kivy.network.urlrequest import UrlRequest
from ORCA.ui.ProgressBar import cProgressBar
from ORCA.utils.Zip import cZipFile
from ORCA.utils.LogError import LogError
from ORCA.utils.wait.StartWait import StartWait
from ORCA.utils.wait.StopWait import StopWait
from ORCA.vars.Access import SetVar
from ORCA.utils.TypeConvert import ToInt
from ORCA.utils.FileName import cFileName
from ORCA.utils.XML import LoadXMLFile
from ORCA.utils.Path import cPath
import ORCA.Globals as Globals
from ORCA.download.DownloadObject import cDownLoadObject
from ORCA.download.RegisterDownload import RegisterDownLoad
__all__ = ['cLoadOnlineResource']
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ORCA.download.Repository import cRepository
else:
from typing import TypeVar
cRepository = TypeVar("cRepository")
# class to load a web based resource by url (not ftp)
# noinspection PyUnusedLocal
class cLoadOnlineResource(EventDispatcher):
""" Class for loading a single online resource """
def __init__(self, *args, **kwargs):
self.oRepository:cRepository = kwargs["oRepository"]
kwargs.pop("oRepository")
super(cLoadOnlineResource, self).__init__(*args, **kwargs)
# noinspection PyUnresolvedReferences
self.register_event_type('on_download_finished')
self.bIsLoading:bool = False
self.oRef:Optional[cDownLoadObject] = None
self.uTarget:str = ""
self.oFnDest:Optional[cFileName] = None
self.uType:str = ""
self.uName:str = ""
self.uVersion:str = ""
self.oWeb = None
self.oProgressBar:Optional[cProgressBar] = None
self.bFinished:bool = False
self.bOnError:bool = False
self.bIsLoading:bool = True
self.bFirstMessage:bool = True
self.uUrl:str = ''
def LoadSingleFile(self,*,uRef:str,oProgressBar:cProgressBar) -> bool:
"""
Loads a single resource by web
dispatches on_download_finished when finished
"""
self.oRef = cDownLoadObject()
self.oRef.FromString(uPars=uRef)
uUrl = self.oRef.dPars["Url"]
self.uTarget = self.oRef.dPars["Target"]
self.uType = self.oRef.dPars["Type"]
self.uName = self.oRef.dPars["Name"]
self.uVersion = self.oRef.dPars["Version"]
self.oFnDest = cFileName('').ImportFullPath(uFnFullName=self.oRef.dPars["Dest"])
self.oWeb = None
self.oProgressBar = oProgressBar
self.bFinished = False
self.bOnError = False
self.bIsLoading = True
self.bFirstMessage = True
StartWait()
try:
Logger.debug("LoadOnlineResource: Downloading [%s] to [%s]" % (uUrl,self.oFnDest.string))
# noinspection PyUnresolvedReferences
self.uUrl = urllib.parse.quote(uUrl, safe="%/:=&?~#+!$,;'@()*[]")
#self.uUrl = urllib.quote(uUrl,safe="%:=&?~#+!$,;'@()*[]")
#todo: fix ocaremote ssl problem and remove verify=false
self.oWeb = UrlRequest(self.uUrl, verify=False, on_success=self.OnSuccess,on_failure=self.OnFailure,on_error=self.OnError,on_progress=self.OnProgress, decode=False,file_path=self.oFnDest.string, debug=False)
return True
except Exception as e:
LogError(uMsg=u'can\'t load online resource LSF: %s to %s' % (self.uUrl,self.oFnDest.string),oException=e)
self.bFinished = True
self.bOnError = True
self.bIsLoading = False
StopWait()
SetVar(uVarName = "DOWNLOADERROR", oVarValue = "1")
# noinspection PyUnresolvedReferences
self.dispatch('on_download_finished')
return False
def OnError(self,request,result) -> None:
""" Handle an FTP error event """
self.OnFailure(request,result)
def OnFailure(self,request,result) -> None:
""" Handle an FTP Failure event """
self.bFinished = True
self.bOnError = True
self.oFnDest.Delete()
LogError(uMsg=u'can\'t load online resource OnFailure: %s to %s [%s]' % (self.uUrl,self.oFnDest.string,result))
self.OnSuccess(None,None)
self.bIsLoading=False
SetVar(uVarName = "DOWNLOADERROR", oVarValue = "1")
StopWait()
return
def OnProgress(self,request,iCurrentSize:int,iTotalSize:int) -> None:
""" Updates the progressbar """
if not self.oProgressBar is None:
if self.bFirstMessage:
self.bFirstMessage=False
self.oProgressBar.SetMax(iTotalSize)
if self.oProgressBar.bCancel:
self.bFinished = True
self.bOnError = True
self.oFnDest.Delete()
self.OnSuccess(None,None)
return
self.oProgressBar.Update(iCurrentSize)
def OnSuccess(self,request,result) -> None:
""" Handles when the download of a single file finishes """
self.bIsLoading=False
# noinspection PyUnresolvedReferences
self.dispatch('on_download_finished')
self.bFinished = True
if not self.oFnDest.Exists():
Logger.error("Target File not found:" + self.oFnDest.string)
self.bOnError = True
if not self.bOnError:
self.Finish()
StopWait()
def on_download_finished(self) -> None:
""" blank function for dispatcher """
pass
def Finish(self) -> bool:
""" Finish loading """
oET_Root:Element
if self.oRef.dPars["Finalize"]=="REPOSITORY XML":
try:
oET_Root = LoadXMLFile(oFile=self.oFnDest, bNoCache=True)
if not oET_Root is None:
self.oRepository.ParseFromXMLNode(oXMLNode=oET_Root)
except Exception as e:
LogError(uMsg=u'can\'t parse repository:'+self.uUrl,oException=e)
return True
if self.oRef.dPars["Finalize"]=="FILE ZIP":
try:
if not self.bOnError:
if self.uTarget.startswith('.' or '..' in self.uTarget):
LogError(uMsg='All destination pathes must be inside the ORCA directory, absolute pathes are not allowed!:'+self.uTarget)
else:
oZipFile:cZipFile = cZipFile('').ImportFullPath(uFnFullName=self.oFnDest.string)
if oZipFile.IsZipFile():
if not Globals.bProtected:
oZipFile.Unzip(cPath('$var(APPLICATIONPATH)/'+self.uTarget))
else:
LogError(uMsg="Protected: Nothing will be unzipped")
else:
if oZipFile.Exists():
Logger.error("Failed to unzip:"+oZipFile.string)
else:
Logger.error("Failed to download zip:" + oZipFile.string)
#todo: handle unzipped files
oZipFile.Delete()
Logger.debug('LoadOnlineResource: Finished download Resource [%s][%s]' % (self.uType,self.uName))
RegisterDownLoad(uType=self.uType,uName=self.uName,iVersion=ToInt(self.uVersion))
except Exception as e:
LogError(uMsg=u'can\'t unpack resources:'+self.uUrl,oException=e)
|
thica/ORCA-Remote
|
src/ORCA/download/LoadOnlineResource.py
|
Python
|
gpl-3.0
| 9,233
|
[
"ORCA"
] |
e8d59f8fc3333a114783b75b2f4bd498661864053d9a68b4d1d546a54736e5e7
|
import astropy.io.fits as pf
import os
import numpy as np
from copy import deepcopy
from itertools import chain
import unittest
import healpy as hp
import warnings
# disable new order warnings in tests
warnings.filterwarnings("ignore")
class TestSphtFunc(unittest.TestCase):
def setUp(self):
self.lmax = 64
self.path = os.path.dirname(os.path.realpath(__file__))
self.map1 = [
hp.ma(m)
for m in hp.read_map(
os.path.join(
self.path, "data", "wmap_band_iqumap_r9_7yr_W_v4_udgraded32.fits"
),
(0, 1, 2),
)
]
self.map2 = [
hp.ma(m)
for m in hp.read_map(
os.path.join(
self.path, "data", "wmap_band_iqumap_r9_7yr_V_v4_udgraded32.fits"
),
(0, 1, 2),
)
]
self.mask = hp.read_map(
os.path.join(
self.path,
"data",
"wmap_temperature_analysis_mask_r9_7yr_v4_udgraded32.fits",
)
).astype(np.bool)
for m in chain(self.map1, self.map2):
m.mask = np.logical_not(self.mask)
self.cla = hp.read_cl(
os.path.join(
self.path,
"data",
"cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_II_lmax64_rmmono_3iter.fits",
)
)
self.cl_fortran_nomask = hp.read_cl(
os.path.join(
self.path,
"data",
"cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_II_lmax64_rmmono_3iter_nomask.fits",
)
)
with pf.open(
os.path.join(
self.path,
"data",
"cl_wmap_band_iqumap_r9_7yr_W_v4_udgraded32_IQU_lmax64_rmmono_3iter.fits",
)
) as cls_file:
# fix for pyfits to read the file with duplicate column names
for i in range(2, 6):
cls_file[1].header["TTYPE%d" % i] += "-%d" % i
cls = cls_file[1].data
# order of HEALPIX is TB, EB while in healpy is EB, TB
self.cliqu = [np.array(cls.field(i)) for i in (0, 1, 2, 3, 5, 4)]
nside = 32
lmax = 64
fwhm_deg = 7.0
seed = 12345
np.random.seed(seed)
self.mapiqu = hp.synfast(
self.cliqu,
nside,
lmax=lmax,
pixwin=False,
fwhm=np.radians(fwhm_deg),
new=False,
)
def test_anafast(self):
cl = hp.anafast(hp.remove_monopole(self.map1[0].filled()), lmax=self.lmax)
self.assertEqual(len(cl), 65)
np.testing.assert_array_almost_equal(cl, self.cla, decimal=8)
def test_anafast_nomask(self):
cl = hp.anafast(hp.remove_monopole(self.map1[0].data), lmax=self.lmax)
self.assertEqual(len(cl), 65)
np.testing.assert_array_almost_equal(cl, self.cl_fortran_nomask, decimal=8)
def test_anafast_iqu(self):
self.map1[0] = hp.remove_monopole(self.map1[0])
cl = hp.anafast(self.map1, lmax=self.lmax)
self.assertEqual(len(cl[0]), 65)
self.assertEqual(len(cl), 6)
for i in range(6):
np.testing.assert_array_almost_equal(cl[i], self.cliqu[i], decimal=8)
def test_anafast_xspectra(self):
cl = hp.anafast(
hp.remove_monopole(self.map1[0]),
hp.remove_monopole(self.map2[0]),
lmax=self.lmax,
)
self.assertEqual(len(cl), self.lmax + 1)
clx = hp.read_cl(
os.path.join(
self.path,
"data",
"cl_wmap_band_iqumap_r9_7yr_WVxspec_v4_udgraded32_II_lmax64_rmmono_3iter.fits",
)
)
np.testing.assert_array_almost_equal(cl, clx, decimal=8)
def test_synfast(self):
nside = 32
lmax = 64
fwhm_deg = 7.0
seed = 12345
np.random.seed(seed)
map_pregen = hp.read_map(
os.path.join(self.path, "data", "map_synfast_seed%d.fits" % seed), (0, 1, 2)
)
sim_map = hp.synfast(
self.cliqu,
nside,
lmax=lmax,
pixwin=False,
fwhm=np.radians(fwhm_deg),
new=False,
pol=True,
)
np.testing.assert_array_almost_equal(sim_map, map_pregen, decimal=8)
def test_smoothing_notmasked(self):
smoothed = hp.smoothing(
[m.data for m in self.map1], fwhm=np.radians(10), lmax=self.lmax
)
smoothed_f90 = hp.read_map(
os.path.join(
self.path,
"data",
"wmap_band_iqumap_r9_7yr_W_v4_udgraded32_smoothed10deg_fortran.fits",
),
(0, 1, 2),
np.float64,
)
np.testing.assert_array_almost_equal(smoothed, smoothed_f90, decimal=6)
def test_smoothing_masked(self):
smoothed = hp.smoothing(self.map1, fwhm=np.radians(10), lmax=self.lmax)
smoothed_f90 = hp.ma(
hp.read_map(
os.path.join(
self.path,
"data",
"wmap_band_iqumap_r9_7yr_W_v4_udgraded32_masked_smoothed10deg_fortran.fits",
),
(0, 1, 2),
np.float64,
)
)
# fortran does not restore the mask
smoothed_f90.mask = smoothed.mask
np.testing.assert_array_almost_equal(
smoothed.filled(), smoothed_f90.filled(), decimal=6
)
def test_gauss_beam(self):
with pf.open(
os.path.join(self.path, "data", "gaussbeam_10arcmin_lmax512_pol.fits")
) as f:
idl_gauss_beam = np.array(f[0].data).T
gauss_beam = hp.gauss_beam(np.radians(10.0 / 60.0), lmax=512, pol=True)
np.testing.assert_allclose(idl_gauss_beam, gauss_beam)
def test_alm2cl(self):
nside = 32
lmax = 64
lmax_out = 100
seed = 12345
np.random.seed(seed)
# Input power spectrum and alm
alm_syn = hp.synalm(self.cla, lmax=lmax)
cl_out = hp.alm2cl(alm_syn, lmax_out=lmax_out - 1)
np.testing.assert_array_almost_equal(cl_out, self.cla[:lmax_out], decimal=4)
def test_map2alm(self):
nside = 32
lmax = 64
fwhm_deg = 7.0
seed = 12345
np.random.seed(seed)
orig = hp.synfast(
self.cla,
nside,
lmax=lmax,
pixwin=False,
fwhm=np.radians(fwhm_deg),
new=False,
)
tmp = np.empty(orig.size * 2)
tmp[::2] = orig
maps = [orig, orig.astype(np.float32), tmp[::2]]
for use_weights in [False, True]:
for input in maps:
alm = hp.map2alm(input, iter=10, use_weights=use_weights)
output = hp.alm2map(alm, nside)
np.testing.assert_allclose(input, output, atol=1e-4)
def test_map2alm_pol(self):
tmp = [np.empty(o.size * 2) for o in self.mapiqu]
for t, o in zip(tmp, self.mapiqu):
t[::2] = o
maps = [
self.mapiqu,
[o.astype(np.float32) for o in self.mapiqu],
[t[::2] for t in tmp],
]
for use_weights in [False, True]:
for input in maps:
alm = hp.map2alm(input, iter=10, use_weights=use_weights)
output = hp.alm2map(alm, 32)
for i, o in zip(input, output):
np.testing.assert_allclose(i, o, atol=1e-4)
def test_map2alm_pol_gal_cut(self):
tmp = [np.empty(o.size * 2) for o in self.mapiqu]
for t, o in zip(tmp, self.mapiqu):
t[::2] = o
maps = [
self.mapiqu,
[o.astype(np.float32) for o in self.mapiqu],
[t[::2] for t in tmp],
]
for use_weights in [False, True]:
for input in maps:
gal_cut = 30
nside = hp.get_nside(input)
npix = hp.nside2npix(nside)
gal_mask = (
np.abs(hp.pix2ang(nside, np.arange(npix), lonlat=True)[1]) < gal_cut
)
alm = hp.map2alm(
input, iter=10, use_weights=use_weights, gal_cut=gal_cut
)
output = hp.alm2map(alm, 32)
for i, o in zip(input, output):
# Testing requires low tolerances because of the
# mask boundary
i[gal_mask] = 0
np.testing.assert_allclose(i, o, atol=1e-2)
def test_rotate_alm(self):
almigc = hp.map2alm(self.mapiqu)
alms = [almigc[0], almigc[0:2], almigc, np.vstack(almigc)]
for i in alms:
o = deepcopy(i)
hp.rotate_alm(o, 0.1, 0.2, 0.3)
hp.rotate_alm(o, -0.3, -0.2, -0.1)
# FIXME: rtol=1e-6 works here, except on Debian with Python 3.4.
np.testing.assert_allclose(i, o, rtol=1e-5)
def test_rotate_alm_rotmatrix(self):
"""rotate_alm also support rotation matrix instead of angles"""
lmax = 32
nalm = hp.Alm.getsize(lmax)
alm = np.zeros([3, nalm], dtype=np.complex)
alm[0, 1] = 1
alm[1, 2] = 1
alm_rotated_angles = alm.copy()
angles = hp.rotator.coordsys2euler_zyz(coord=["G", "E"])
hp.rotate_alm(alm_rotated_angles, *angles)
gal2ecl = hp.Rotator(coord=["G", "E"])
hp.rotate_alm(alm, matrix=gal2ecl.mat)
np.testing.assert_allclose(alm_rotated_angles, alm)
def test_rotate_alm2(self):
# Test rotate_alm against the Fortran library
lmax = 64
nalm = hp.Alm.getsize(lmax)
alm = np.zeros([3, nalm], dtype=np.complex)
for i in range(3):
for ell in range(lmax + 1):
for m in range(ell):
ind = hp.Alm.getidx(lmax, ell, m)
alm[i, ind] = (i + 1) * 10 + ell + 1j * m
psi = np.pi / 3.0
theta = 0.5
phi = 0.01
hp.rotate_alm(alm, psi, theta, phi)
ref_0_0_0 = 0.00000000000 + 0.00000000000j
ref_0_21_0 = -64.0056622444 + 0.00000000000j
ref_0_21_21 = -3.19617408364 + 2.00219590117j
ref_0_42_0 = 87.8201360825 + 0.00000000000j
ref_0_42_21 = -6.57242309702 + 50.1128079361j
ref_0_42_42 = 0.792592362074 - 0.928452597766j
ref_0_63_0 = -49.6732554742 + 0.00000000000j
ref_0_63_21 = -51.2812623888 - 61.6289129316j
ref_0_63_42 = -9.32823219430 + 79.0787993482j
ref_0_63_63 = -0.157204566965 + 0.324692958700j
ref_1_0_0 = 0.00000000000 + 0.00000000000j
ref_1_21_0 = -85.5520809077 + 0.00000000000j
ref_1_21_21 = -3.57384285749 + 2.93255811219j
ref_1_42_0 = 107.541172254 + 0.00000000000j
ref_1_42_21 = -2.77944941833 + 57.1015322415j
ref_1_42_42 = 0.794212854046 - 1.10982745343j
ref_1_63_0 = -60.7153303746 + 0.00000000000j
ref_1_63_21 = -61.0915123767 - 65.9943878923j
ref_1_63_42 = -4.86354653261 + 86.5277253196j
ref_1_63_63 = -0.147165377786 + 0.360474777237j
ref = np.array(
[
ref_0_0_0,
ref_0_21_0,
ref_0_21_21,
ref_0_42_0,
ref_0_42_21,
ref_0_42_42,
ref_0_63_0,
ref_0_63_21,
ref_0_63_42,
ref_0_63_63,
ref_1_0_0,
ref_1_21_0,
ref_1_21_21,
ref_1_42_0,
ref_1_42_21,
ref_1_42_42,
ref_1_63_0,
ref_1_63_21,
ref_1_63_42,
ref_1_63_63,
]
)
mine = []
for i in [0, 1]:
for ell in range(0, lmax + 1, 21):
for m in range(0, ell + 1, 21):
ind = hp.Alm.getidx(lmax, ell, m)
mine.append(alm[i, ind])
mine = np.array(ref)
np.testing.assert_allclose(ref, mine, rtol=1e-10)
def test_accept_ma_allows_only_keywords(self):
""" Test whether 'smoothing' wrapped with accept_ma works with only
keyword arguments. """
ma = np.ones(12 * 16 ** 2)
try:
hp.smoothing(map_in=ma)
except IndexError:
self.fail()
def test_beam2bl(self):
""" Test beam2bl against analytical transform of Gaussian beam. """
theta = np.linspace(0, np.radians(1.0), 1000)
sigma = np.radians(10.0 / 60.0) / np.sqrt(8.0 * np.log(2.0))
gaussian_beam = np.exp(-0.5 * (theta / sigma) ** 2) / (2 * np.pi * sigma ** 2)
ell = np.arange(512 + 1.0)
gaussian_window = np.exp(-0.5 * ell * (ell + 1) * sigma ** 2)
bl = hp.beam2bl(gaussian_beam, theta, 512)
np.testing.assert_allclose(gaussian_window, bl, rtol=1e-4)
def test_bl2beam(self):
""" Test bl2beam against analytical transform of Gaussian beam. """
theta = np.linspace(0, np.radians(3.0), 1000)
sigma = np.radians(1.0) / np.sqrt(8.0 * np.log(2.0))
gaussian_beam = np.exp(-0.5 * (theta / sigma) ** 2) / (2 * np.pi * sigma ** 2)
ell = np.arange(2048 + 1.0)
gaussian_window = np.exp(-0.5 * ell * (ell + 1) * sigma ** 2)
beam = hp.bl2beam(gaussian_window, theta)
np.testing.assert_allclose(gaussian_beam, beam, rtol=1e-3)
def test_max_nside_check(self):
""" Test whether the max_nside_check correctly raises ValueErrors for nsides
that are too large."""
# Test an nside that is too large
with self.assertRaises(ValueError):
hp.check_max_nside(16384)
# Test an nside that is valid
# hp.check_max_nside will return 0 if no exceptions are raised
self.assertEqual(hp.check_max_nside(1024), 0)
def test_pixwin_base(self):
# Base case
nsides = [2 ** p for p in np.arange(1, 14)]
[hp.pixwin(nside) for nside in nsides]
# Test invalid nside
with self.assertRaises(ValueError):
hp.pixwin(15)
def test_pixwin_pol(self):
pixwin = hp.pixwin(128, pol=True)
self.assertEqual(len(pixwin), 2)
def test_pixwin_lmax(self):
nside = 128
pixwin = hp.pixwin(nside, lmax=None)
self.assertEqual(len(pixwin), 3 * nside)
lmax = 200
pixwin = hp.pixwin(nside, lmax=lmax)
self.assertEqual(len(pixwin) - 1, lmax)
def test_getlm_overflow(self):
# test that overflow raises valueerror
with self.assertRaises(AssertionError):
hp.Alm.getlm(500, 125751)
if __name__ == "__main__":
unittest.main()
|
cjcopi/healpy
|
healpy/test/test_sphtfunc.py
|
Python
|
gpl-2.0
| 14,940
|
[
"Gaussian"
] |
be3434b71f88cedd90a94ef0c379bb98c058ac61c903440a988d20f23bc6f0da
|
'''Nest-specific implementation of the grid cell model.
.. currentmodule:: grid_cell_model.models.gc_net_nest
Classes
-------
.. autosummary::
NestGridCellNetwork
BasicGridCellNetwork
ConstantVelocityNetwork
PosInputs
ConstPosInputs
'''
from __future__ import absolute_import, print_function
import logging
import collections
import numpy as np
from scipy.io import loadmat
import nest
from . import gc_neurons
from .gc_net import GridCellNetwork
from .place_input import PlaceCellInput
from .place_cells import UniformBoxPlaceCells
from simtools.storage import DataStorage
logger = logging.getLogger(__name__)
gcnLogger = logging.getLogger('{0}.{1}'.format(__name__,
"NestGridCellNetwork"))
nest.Install('gridcellsmodule')
class PosInputs(object):
'''Data representing animal position input.'''
def __init__(self, pos_x, pos_y, pos_dt):
self.pos_x = pos_x
self.pos_y = pos_y
self.pos_dt = pos_dt
def __str__(self):
res = ("PosInputs:\n pos_x: {0}\n pos_y: {1}\n "
"pos_dt: {2}".format(self.pos_x, self.pos_y, self.pos_dt))
return res
class ConstPosInputs(PosInputs):
'''Data representing constant position of the animal.'''
def __init__(self, pos_x, pos_y):
# dt here is irrelevant (say 1e3). This data will never get advanced
super(ConstPosInputs, self).__init__([float(pos_x)], [float(pos_y)],
1e3)
def __str__(self):
res = ('ConstPosInputs:\n pos_x: {0}\n pos_y: {1}\n '
'pos_dt: {2}'.format(self.pos_x, self.pos_y, self.pos_dt))
return res
class NestGridCellNetwork(GridCellNetwork):
'''Grid cell network implemented in NEST simulator.'''
def __init__(self, neuronOpts, simulationOpts):
GridCellNetwork.__init__(self, neuronOpts, simulationOpts)
self.velocityInputInitialized = False
self.spikeMon_e = None
self.spikeMon_i = None
self.stateMon_e = None
self.stateMon_i = None
# Extra monitors
self._extraSpikeMons = {} # Extra spike monitors
self._extraStateMons = {} # Extra state monitors
self._ratVelocitiesLoaded = False
self._placeCellsLoaded = False
self._i_placeCellsLoaded = False
self.PC = []
self.PC_start = []
self.IPC = []
self.IPCHelper = None
self.NIPC = None
self._initNESTKernel()
self._constructNetwork()
self._initStates()
self._initCellularProperties()
def uniformDistrib(self, mean, spread, N):
'''Generate a uniform distribution of neurons parameters.
Parameters
----------
mean : float
Mean of the distribution
spread : float
Width of the distribution around mean.
N : float
Number of numbers to generate.
Returns
-------
An array of numbers drawn from this distribution.
'''
return mean - spread / 2.0 * np.random.rand(N)
def _initStates(self):
'''Initialise states of E and I neurons randomly.'''
nest.SetStatus(self.E_pop, 'V_m', (self.no.EL_e +
(self.no.Vt_e - self.no.EL_e) *
np.random.rand(len(self.E_pop))))
nest.SetStatus(self.I_pop, 'V_m', (self.no.EL_i +
(self.no.Vt_i - self.no.EL_i) *
np.random.rand(len(self.I_pop))))
def _initCellularProperties(self):
'''Initialise the cellular properties of neurons in the network.'''
EL_e = self.uniformDistrib(self.no.EL_e, self.no.EL_e_spread,
len(self.E_pop))
taum_e = self.uniformDistrib(self.no.taum_e, self.no.taum_e_spread,
len(self.E_pop))
EL_i = self.uniformDistrib(self.no.EL_i, self.no.EL_i_spread,
len(self.I_pop))
taum_i = self.uniformDistrib(self.no.taum_i, self.no.taum_i_spread,
len(self.I_pop))
nest.SetStatus(self.E_pop, 'E_L', EL_e)
nest.SetStatus(self.E_pop, 'C_m', taum_e * self.no.gL_e)
nest.SetStatus(self.I_pop, 'E_L', EL_i)
nest.SetStatus(self.I_pop, 'C_m', taum_i * self.no.gL_i)
# def _initClocks(self):
# for clk in self._clocks:
# clk.reinit()
def _initNESTKernel(self):
'''Initialise the NEST kernel.'''
gcnLogger.debug('Initializing NEST kernel: no. of threads: %d',
self.no.nthreads)
nest.ResetKernel()
nest.SetKernelStatus({"resolution": self.no.sim_dt,
"print_time": False})
nest.SetKernelStatus({"local_num_threads": self.no.nthreads})
# def reinit(self):
# self._initNESTKernel()
# self._initStates()
# self._initClocks()
def _constructNetwork(self):
'''Construct the E/I network'''
self.e_neuron_params = gc_neurons.getENeuronParams(self.no)
self.i_neuron_params = gc_neurons.getINeuronParams(self.no)
self.B_GABA = 1.0 # Must be here for compatibility with brian code
self.e_model_name = "iaf_gridcells"
self.i_model_name = "iaf_gridcells"
self.e_receptors = \
nest.GetDefaults(self.e_model_name)['receptor_types']
self.i_receptors = \
nest.GetDefaults(self.i_model_name)['receptor_types']
self.E_pop = nest.Create(self.e_model_name, self.net_Ne,
params=self.e_neuron_params)
self.I_pop = nest.Create(self.i_model_name, self.net_Ni,
params=self.i_neuron_params)
nest.CopyModel(
'static_synapse', 'I_AMPA_NMDA',
params={'receptor_type': self.i_receptors['AMPA_NMDA']})
nest.CopyModel(
'static_synapse', 'E_GABA_A',
params={'receptor_type': self.e_receptors['GABA_A']})
nest.CopyModel(
'static_synapse', 'PC_AMPA',
params={'receptor_type': self.e_receptors['AMPA']})
# Connect E-->I and I-->E
self._connect_network()
def simulate(self, time, printTime=True):
'''Run the simulation'''
self.endConstruction()
self.beginSimulation()
nest.SetKernelStatus({"print_time": bool(printTime)})
if not self.velocityInputInitialized:
velMsg = ("Velocity input has not been initialized. Make sure "
"this is the desired behavior. If you have set the "
"'velON' parameter to 1, then this message probably "
"indicates a bug in the simulation code.")
gcnLogger.warn(velMsg)
nest.Simulate(time)
def getSpikeDetector(self, type, N_ids=None):
'''
Get a spike detector that records from neurons given N_ids and from the
population type given by type
'''
if type == "E":
if self.spikeMon_e is not None:
return self.spikeMon_e
else:
if N_ids is None:
N_ids = np.arange(len(self.E_pop))
src = list(np.array(self.E_pop)[N_ids])
self.spikeMon_e = nest.Create('spike_detector')
nest.SetStatus(self.spikeMon_e, {
"label" : "E spikes",
'withtime': True,
'withgid' : True})
nest.ConvergentConnect(src, self.spikeMon_e)
return self.spikeMon_e
elif type == "I":
if self.spikeMon_i is not None:
return self.spikeMon_i
else:
if N_ids is None:
N_ids = np.arange(len(self.I_pop))
src = list(np.array(self.I_pop)[N_ids])
self.spikeMon_i = nest.Create('spike_detector')
nest.SetStatus(self.spikeMon_i, {
"label" : "I spikes",
'withtime': True,
'withgid' : True})
# print src
nest.ConvergentConnect(src, self.spikeMon_i)
return self.spikeMon_i
else:
raise ValueError("Unsupported type of spike detector: " + type)
def getGenericSpikeDetector(self, gids, label):
'''
NEST specific function to get a spike detector that monitors a
population of neurons with global id set to gids.
Parameters
----------
gids : list
A list of global ids of the neurons to monitor. There is one
profound limitation of this method: the gids must be a list of
increasing integers without gaps. Otherwise the local translation
of neuron numbers when saving the data will not work!.
'''
mon = nest.Create('spike_detector')
nest.SetStatus(mon, {
"label" : label,
'withtime': True,
'withgid' : True})
nest.ConvergentConnect(gids, mon)
self._extraSpikeMons[label] = (mon, gids[0])
return mon
def getStateMonitor(self, type, N_ids, params):
'''
Return a state monitor for a given population (type) and relative
indexes of neurons (N_ids), with parameters given by params
'''
if len(N_ids) == 0:
raise ValueError("State monitor needs to record from at least one "
"neuron")
N = len(N_ids)
if type == "E":
if self.stateMon_e is None:
self.stateMon_e = nest.Create('multimeter', N, params=params)
nest.Connect(self.stateMon_e, self.E_pop[0] + np.array(N_ids))
return self.stateMon_e
elif type == "I":
if self.stateMon_i is None:
self.stateMon_i = nest.Create('multimeter', N, params=params)
nest.Connect(self.stateMon_i, self.I_pop[0] + np.array(N_ids))
return self.stateMon_i
def getGenericStateMonitor(self, gids, params, label):
'''Create a user defined state monitor (multimeter)
@param gids Global IDs of the neurons.
@param params Parameter of the state monitors
@return Global IDs for manipulation in the nest space
'''
if len(gids) == 0:
logger.warn('Requested to create 0 state monitors. Ignoring...')
return []
mon = nest.Create('multimeter', len(gids), params=params)
nest.Connect(mon, gids)
self._extraStateMons[label] = (mon)
return mon
def _divergentConnectEE(self, pre, post, weights):
post_global = list(self.E_pop[0] + np.asanyarray(post))
nest.DivergentConnect([self.E_pop[0] + pre], post_global,
model='I_AMPA_NMDA', weight=list(weights),
delay=[self.no.delay] * len(weights))
def _divergentConnectEI(self, pre, post, weights):
post_global = list(self.I_pop[0] + np.array(post))
nest.DivergentConnect([self.E_pop[0] + pre], post_global,
model='I_AMPA_NMDA', weight=list(weights),
delay=[self.no.delay] * len(weights))
def _randomDivergentConnectEI(self, pre, post, n, weights):
'''Connect each neuron in ``pre`` (E population) to n randomly selected
neurons in ``post`` (I population), with weights specified in
``weights``. If weights is a float then all the weights are constant.
'''
if isinstance(weights, collections.Iterable):
delay = [self.no.delay] * len(weights)
else:
delay = self.no.delay
nest.RandomDivergentConnect(
(self.E_pop[0] + np.asanyarray(pre)).tolist(),
(self.I_pop[0] + np.asanyarray(post)).tolist(),
n,
weight=weights,
model='I_AMPA_NMDA',
delay=delay)
def _randomDivergentConnectIE(self, pre, post, n, weights):
'''Connect each neuron in ``pre`` (I population) to n randomly selected
neurons in ``post`` (E population), with weights specified in
``weights``. If weights is a float then all the weights are constant.
'''
if isinstance(weights, collections.Iterable):
delay = [self.no.delay] * len(weights)
else:
delay = self.no.delay
nest.RandomDivergentConnect(
(self.I_pop[0] + np.asanyarray(pre)).tolist(),
(self.E_pop[0] + np.asanyarray(post)).tolist(),
n,
weight=weights,
model='E_GABA_A',
delay=delay)
def _randomDivergentConnectII(self, pre, post, n, weights,
allow_autapses=False, allow_multapses=False):
'''Connect each neuron in ``pre`` (I population) to n randomly selected
neurons in ``post`` (I population), with weights specified in
``weights``. If weights is a float then all the weights are constant.
'''
if isinstance(weights, collections.Iterable):
delay = [self.no.delay] * len(weights)
else:
delay = self.no.delay
nest.RandomDivergentConnect(
(self.I_pop[0] + np.asanyarray(pre)).tolist(),
(self.I_pop[0] + np.asanyarray(post)).tolist(),
n,
weight=weights,
model='E_GABA_A',
delay=delay,
options={'allow_autapses': allow_autapses,
'allow_multapses': allow_multapses})
def _divergentConnectIE(self, pre, post, weights):
post_global = list(self.E_pop[0] + np.array(post))
nest.DivergentConnect([self.I_pop[0] + pre], post_global,
model='E_GABA_A', weight=list(weights),
delay=[self.no.delay] * len(weights))
def getConnMatrix(self, popType):
'''
Return all *input* connections to neuron with index post from the
specified popType.
Parameters
----------
popType : string, 'E' or 'I'
Type of the population. If popType == 'E', return connection
weights for AMPA connections only. The NMDA connections will be a
fraction of the AMPA connection strength specified by the
NMDA_amount parameter.
If popType == 'I' the connection weights returned will be for
GABA_A connections.
output : a 2D numpy array
An array containing the connections. The shape is (post,
pre)/(target, source).
'''
EStart = np.min(self.E_pop)
IStart = np.min(self.I_pop)
print("EStart: {0}".format(EStart))
print("IStart: {0}".format(IStart))
print("len(self.E_pop): {0}".format(len(self.E_pop)))
print("len(self.I_pop): {0}".format(len(self.I_pop)))
if popType == 'E':
W_IE = np.zeros((len(self.I_pop), len(self.E_pop)))
for e in xrange(len(self.E_pop)):
print("E neuron {0} --> I neurons".format(e))
conns = nest.FindConnections([self.E_pop[e]])
for i in xrange(len(conns)):
target = nest.GetStatus([conns[i]], 'target')
if target[0] in self.I_pop:
W_IE[target[0] - IStart, e] = \
nest.GetStatus([conns[i]], 'weight')[0]
return W_IE
elif popType == 'I':
W_EI = np.zeros((len(self.E_pop), len(self.I_pop)))
for i in xrange(len(self.I_pop)):
print("I neuron {0} --> E neurons".format(i))
conns = nest.FindConnections([self.I_pop[i]])
for e in xrange(len(conns)):
target = nest.GetStatus([conns[e]], 'target')
if target[0] in self.E_pop:
W_EI[target[0] - EStart, i] = \
nest.GetStatus([conns[e]], 'weight')[0]
return W_EI
else:
msg = 'popType must be either \'E\' or \'I\'. Got {0}'
raise ValueError(msg.format(popType))
###########################################################################
# External sources definitions
###########################################################################
def _loadRatVelocities(self):
'''
Load rat velocities (in this case positions only)
'''
if self._ratVelocitiesLoaded:
return
logger.info('Loading rat velocities')
self.ratData = loadmat(self.no.ratVelFName)
self.rat_dt = self.ratData['dt'][0][0] * 1e3 # units: ms
self.rat_pos_x = self.ratData['pos_x'].ravel()
self.rat_pos_y = self.ratData['pos_y'].ravel()
# Map velocities to currents: we use the slope of bump speed vs. rat
# speed and inter-peak grid field distance to remap
# Bump speed-current slope must be estimated
self.velC = self.Ne_x / self.no.gridSep / self.no.bumpCurrentSlope
self._ratVelocitiesLoaded = True
gcnLogger.debug('velC: %f, bumpCurrentSlope: %f, gridSep: %f',
self.velC, self.no.bumpCurrentSlope, self.no.gridSep)
def setVelocityCurrentInput_e(self, prefDirs_mask=None):
'''
Set up movement simulation, based on preferred directions of neurons.
prefDirs_mask can be used to manipulate velocity input strength
for each neuron.
'''
logger.info("Setting up velocity input current.")
self._loadRatVelocities()
if prefDirs_mask is None:
self._prefDirs_mask_e = np.ndarray((len(self.E_pop), 2))
self._prefDirs_mask_e[:, :] = 1.0
else:
raise NotImplementedError()
# Load velocities into nest: they are all shared among all
# iaf_gridcells nodes so only one neuron needs setting the actual
# values
npos = int(self.no.time / self.rat_dt)
nest.SetStatus([self.E_pop[0]], {
"rat_pos_x" : self.rat_pos_x[0:npos].tolist(),
"rat_pos_y" : self.rat_pos_y[0:npos].tolist(),
"rat_pos_dt": self.rat_dt}) # s --> ms
nest.SetStatus(self.E_pop, "pref_dir_x", self.prefDirs_e[:, 0])
nest.SetStatus(self.E_pop, "pref_dir_y", self.prefDirs_e[:, 1])
nest.SetStatus(self.E_pop, "velC", self.velC)
self.velocityInputInitialized = True
def setConstantVelocityCurrent_e(self, vel, start_t=None, end_t=None):
'''
Set the model so that there is only a constant velocity current input.
'''
gcnLogger.info('Setting up constant velocity current '
'input: {0}'.format(vel))
if start_t is not None:
raise Exception("Const velocity start time cannot be overridden "
"in this model!")
start_t = self.no.theta_start_t
if end_t is None:
end_t = self.no.time
self.rat_dt = 20.0 # ms
nVel = int((end_t - start_t) / self.rat_dt)
self.rat_pos_x = np.cumsum(np.array([vel[0]] * nVel)) * (self.rat_dt *
1e-3)
self.rat_pos_y = np.cumsum(np.array([vel[1]] * nVel)) * (self.rat_dt *
1e-3)
# Force these velocities, not the animal velocitites
self._ratVelocitiesLoaded = True
# Load velocities into nest: they are all shared among all
# iaf_gridcells nodes so only one neuron needs setting the actual
# values
nest.SetStatus([self.E_pop[0]], {
"rat_pos_x" : self.rat_pos_x.tolist(),
"rat_pos_y" : self.rat_pos_y.tolist(),
"rat_pos_dt": self.rat_dt}) # s --> ms
print(self.rat_pos_x)
print(self.rat_pos_y)
# Map velocities to currents: Here the mapping is 1:1, i.e. the
# velocity dictates the current
self.velC = 1.
nest.SetStatus(self.E_pop, "pref_dir_x", self.prefDirs_e[:, 0])
nest.SetStatus(self.E_pop, "pref_dir_y", self.prefDirs_e[:, 1])
nest.SetStatus(self.E_pop, "velC", self.velC)
self.setStartPlaceCells(PosInputs([0.], [.0], self.rat_dt))
self.velocityInputInitialized = True
def setStartPlaceCells(self, posIn):
'''Create and connect the initialisation place cells.'''
if len(self.PC_start) == 0:
gcnLogger.info("Setting up initialization place cells")
gcnLogger.debug("Init place cell positional input: {0}".format(
str(posIn)))
gcnLogger.debug("Init place cells: start: {0}, end: {1}".format(
0, self.no.theta_start_t))
self.PC_start, _, _ = self.createGenericPlaceCells(
self.no.N_place_cells,
self.no.pc_start_max_rate,
self.no.pc_start_conn_weight,
start=0.0,
end=self.no.theta_start_t,
posIn=posIn)
else:
gcnLogger.info('Initialization place cells already set. Skipping '
'the set up')
def setPlaceCells(self, start=None, end=None, posIn=None):
'''Place cells to initialize the bump.
It should be initialized onto the correct position, i.e. the bump must
be at the correct starting position, which matches the actual velocity
simulation place cell input.
'''
if posIn is None:
self._loadRatVelocities()
startPos = ConstPosInputs(self.rat_pos_x[0], self.rat_pos_y[0])
else:
startPos = ConstPosInputs(posIn.pos_x[0], posIn.pos_y[0])
self.setStartPlaceCells(startPos)
# Here the actual velocity place cells
gcnLogger.info("Setting up place cells. User defined positional "
"data: {0}".format('no' if posIn is None else 'yes'))
gcnLogger.debug("Place cell positional input: {0}".format(str(posIn)))
self.PC, _, _ = self.createGenericPlaceCells(self.no.N_place_cells,
self.no.pc_max_rate,
self.no.pc_conn_weight,
start, end, posIn)
def createGenericPlaceCells(self, N, maxRate, weight, start=None, end=None,
posIn=None):
'''
Generate place cells and connect them to grid cells. The wiring is
fixed, and there is no plasticity. This method can be used more than
once, to set up different populations of place cells.
'''
if start is None:
start = self.no.theta_start_t
if end is None:
end = self.no.time
if posIn is None:
self._loadRatVelocities()
posIn = PosInputs(self.rat_pos_x, self.rat_pos_y, self.rat_dt)
if N != 0:
gcnLogger.info('Setting up generic place cells')
NTotal = N * N
boxSize = [self.no.arenaSize, self.no.arenaSize]
PCHelper = UniformBoxPlaceCells(boxSize, (N, N), maxRate,
self.no.pc_field_std, random=False)
PC = nest.Create('place_cell_generator', NTotal,
params={'rate' : maxRate,
'field_size': self.no.pc_field_std,
'start' : start,
'stop' : end})
nest.SetStatus(PC, 'ctr_x', PCHelper.centers[:, 0])
nest.SetStatus(PC, 'ctr_y', PCHelper.centers[:, 1])
npos = int(self.no.time / posIn.pos_dt)
nest.SetStatus([PC[0]], params={
'rat_pos_x' : list(posIn.pos_x[0:npos]),
'rat_pos_y' : list(posIn.pos_y[0:npos]),
'rat_pos_dt': posIn.pos_dt})
# test_x = nest.GetStatus([PC[0]], 'rat_pos_x')
# test_y = nest.GetStatus([PC[0]], 'rat_pos_y')
# print test_x, test_y
# Connections
# Here we extract connections from the PlaceCellInput class that
# was originaly used as a current input generator for place cell
# resetting mechanism. The output of this class perfectly matches
# how divergent connections from a single place cell should be
# mapped onto the twisted torus grid cell sheet
# how divergent the connections are, 3sigma rule --> division by 6.
connStdDev = self.no.gridSep / 2. / 6.
pc_weight_threshold = 0.1
pc_input = PlaceCellInput(self.Ne_x, self.Ne_y, self.no.arenaSize,
self.no.gridSep, [.0, .0],
fieldSigma=connStdDev)
ctr_x = nest.GetStatus(PC, 'ctr_x')
ctr_y = nest.GetStatus(PC, 'ctr_y')
for pc_id in xrange(NTotal):
w = pc_input.getSheetInput(ctr_x[pc_id],
ctr_y[pc_id]).flatten()
gt_th = w > pc_weight_threshold
post = np.array(self.E_pop)[gt_th]
w = w[gt_th]
# print post, w
nest.DivergentConnect(
[PC[pc_id]],
list(post),
weight=list(w * weight),
delay=[self.no.delay] * len(w),
model='PC_AMPA')
return PC, PCHelper, NTotal
else:
gcnLogger.warn("trying to set up place cells with N_place_cells "
"== 0")
self._placeCellsLoaded = True
def setIPlaceCells(self):
self._createIPlaceCells(self.no.ipc_N,
int(self.no.ipc_nconn),
self.no.ipc_max_rate,
self.no.ipc_weight,
self.no.ipc_field_std)
def _createIPlaceCells(self, N, Nconn_pcs, maxRate, weight, field_std,
start=None, end=None, posIn=None):
'''
Generate place cells and connect them to I cells. The wiring is
fixed, and there is no plasticity. This method can be used more than
once, to set up different populations of place cells.
Here the widths of the place fields are the same as in the case of the
generic place cells.
Parameters
----------
Nconn_pcs : int
Number of place cells connected to each I neurons.
'''
if start is None:
start = self.no.theta_start_t
if end is None:
end = self.no.time
if posIn is None:
self._loadRatVelocities()
posIn = PosInputs(self.rat_pos_x, self.rat_pos_y, self.rat_dt)
NTotal = N * N
PC = None
PCHelper = None
if N != 0:
gcnLogger.info('Setting up place cells connected to I cells')
gcnLogger.info("N: %d, Nconn_pcs: %d, maxRate: %f, weight: %f, field_std: %f", N,
int(Nconn_pcs), maxRate, weight, field_std)
boxSize = [self.no.arenaSize, self.no.arenaSize]
PCHelper = UniformBoxPlaceCells(boxSize, (N, N), maxRate,
field_std, random=False)
PC = nest.Create('place_cell_generator', NTotal,
params={'rate' : maxRate,
'field_size': field_std,
'start' : start,
'stop' : end})
nest.SetStatus(PC, 'ctr_x', PCHelper.centers[:, 0])
nest.SetStatus(PC, 'ctr_y', PCHelper.centers[:, 1])
npos = int(self.no.time / posIn.pos_dt)
nest.SetStatus([PC[0]], params={
'rat_pos_x' : list(posIn.pos_x[0:npos]),
'rat_pos_y' : list(posIn.pos_y[0:npos]),
'rat_pos_dt': posIn.pos_dt})
# Connections
# I-PCs are connected with a constant connection weight to I cells
for i_idx in self.I_pop:
pc_idx = np.random.choice(PC, Nconn_pcs, replace=False)
nest.ConvergentConnect(pc_idx.tolist(), [i_idx],
weight=weight,
delay=self.no.delay,
model='PC_AMPA')
else:
gcnLogger.warn("Trying to set up I place cells with 0 place cells.")
self._i_placeCellsLoaded = True
self.IPC = PC
self.IPCHelper = PCHelper
self.NIPC = NTotal
#self._getIPCConnections()
def _getIPCConnections(self):
IStart = self.I_pop[0]
W = np.zeros((len(self.I_pop), len(self.IPC)))
for pcn in xrange(len(self.IPC)):
print("IPC {0} --> I neurons".format(pcn))
conns = nest.FindConnections([self.IPC[pcn]])
for i in xrange(len(conns)):
target = nest.GetStatus([conns[i]], 'target')
if target[0] in self.I_pop:
W[target[0] - IStart, pcn] = nest.GetStatus([conns[i]],
'weight')[0]
else:
print("Target not in I_pop!")
return W
###########################################################################
# Other
###########################################################################
def getRatData(self):
'''Return the data representing the animal (rat).'''
return self.ratData
def getAttrDictionary(self):
d = {}
d['e_neuron_params'] = self.e_neuron_params
d['i_neuron_params'] = self.i_neuron_params
d['B_GABA' ] = self.B_GABA
d['E_pop' ] = np.array(self.E_pop)
d['I_pop' ] = np.array(self.I_pop)
d['PC' ] = np.array(self.PC)
d['PC_start' ] = np.array(self.PC_start)
d['net_Ne' ] = self.net_Ne
d['net_Ni' ] = self.net_Ni
d['rat_pos_x' ] = getattr(self, 'rat_pos_x', np.nan)
d['rat_pos_y' ] = getattr(self, 'rat_pos_y', np.nan)
d['rat_dt' ] = getattr(self, 'rat_dt', np.nan)
d['velC' ] = getattr(self, 'velC', np.nan)
d['Ne_x' ] = self.Ne_x
d['Ne_y' ] = self.Ne_y
d['Ni_x' ] = self.Ni_x
d['Ni_y' ] = self.Ni_y
d['prefDirs_e' ] = self.prefDirs_e
return d
class BasicGridCellNetwork(NestGridCellNetwork):
'''The default grid cell network.
A grid cell network that generates the common network and creates a basic
set of spike monitors and state monitors which are generically usable in
most of the simulation setups.
'''
def getDefaultStateMonParams(self):
'''Generate default, pre-set state monitor parameters.'''
return {
'withtime': True,
'interval': 10.0 * self.no.sim_dt,
'record_from': ['V_m', 'I_clamp_AMPA', 'I_clamp_NMDA',
'I_clamp_GABA_A', 'I_stim']
}
def fillParams(self, dest, src):
'''Fill properties into a dictionary.'''
for key, value in src.iteritems():
dest[key] = value
return dest
def __init__(self, options, simulationOpts=None,
nrec_spikes=(None, None),
stateRecord_type='middle-center',
stateRecParams=(None, None),
rec_spikes_probabilistic=False):
'''
TODO
'''
NestGridCellNetwork.__init__(self, options, simulationOpts)
# Spikes
self.nrecSpikes_e = nrec_spikes[0]
self.nrecSpikes_i = nrec_spikes[1]
if self.nrecSpikes_e is None:
self.nrecSpikes_e = self.Ne_x * self.Ne_y
if self.nrecSpikes_i is None:
self.nrecSpikes_i = self.Ni_x * self.Ni_y
if rec_spikes_probabilistic == False:
self.spikeMon_e = self.getSpikeDetector("E",
np.arange(self.nrecSpikes_e))
self.spikeMon_i = self.getSpikeDetector("I",
np.arange(self.nrecSpikes_i))
else:
self.spikeMon_e = self.getSpikeDetector(
"E", np.sort(np.random.choice(len(self.E_pop), self.nrecSpikes_e,
replace=False)))
self.spikeMon_i = self.getSpikeDetector(
"I", np.sort(np.random.choice(len(self.I_pop),
self.nrecSpikes_i,
replace=False)))
# States
if stateRecord_type == 'middle-center':
self.state_record_e = [self.Ne_x / 2 - 1,
(self.Ne_y / 2 * self.Ne_x +
self.Ne_x / 2 - 1)]
self.state_record_i = [self.Ni_x / 2 - 1,
(self.Ni_y / 2 * self.Ni_x +
self.Ni_x / 2 - 1)]
else:
raise ValueError("Currently stateRecordType must be "
"'middle-center'")
self.stateMonParams_e = self.getDefaultStateMonParams()
self.stateMonParams_i = self.getDefaultStateMonParams()
stRecp_e = stateRecParams[0]
stRecp_i = stateRecParams[1]
if stRecp_e is not None:
self.fillParams(self.stateMonParams_e, stRecp_e)
if stRecp_i is not None:
self.fillParams(self.stateMonParams_i, stRecp_i)
self.stateMon_e = self.getStateMonitor("E",
self.state_record_e,
self.stateMonParams_e)
self.stateMon_i = self.getStateMonitor("I",
self.state_record_i,
self.stateMonParams_i)
def getMonitors(self):
'''Return the main spike and state monitors.'''
return (
self.spikeMon_e,
self.spikeMon_i,
self.stateMon_e,
self.stateMon_i
)
def getSpikeMonData(self, mon, gidStart):
'''
Generate a dictionary of a spike data from the monitor ``mon``
Notes
-----
NEST has some troubles with consistency in returning data in a correct
format on OSX and Linux. On Linux, sequential data are apparently
returned as np.ndarray, while on OSX the data are returned as lists.
This obviously causes huge data files on OSX since the data are stored
as lists into HDF5.
'''
st = nest.GetStatus(mon)[0]
events = st['events']
for key in events.keys():
events[key] = np.asanyarray(events[key])
events['senders'] -= gidStart
return st
def getStateMonData(self, mon):
'''
Generate a dictionary of state monitor data from the monitor ``mon``
Notes
-----
NEST has some troubles with consistency in returning data in a correct
format on OSX and Linux. On Linux, sequential data are apparently
returned as np.ndarray, while on OSX the data are returned as lists.
This obviously causes huge data files on OSX since the data are
serialized as lists into HDF5.
'''
out = nest.GetStatus(mon)
for mon_idx in range(len(out)):
events = out[mon_idx]['events']
for key in events.keys():
events[key] = np.asanyarray(events[key])
return out
def getSpikes(self, **kw):
'''
Return a dictionary of spike monitor data.
'''
out = {}
if self.spikeMon_e is not None:
out['spikeMon_e'] = self.getSpikeMonData(self.spikeMon_e,
self.E_pop[0])
if self.spikeMon_i is not None:
out['spikeMon_i'] = self.getSpikeMonData(self.spikeMon_i,
self.I_pop[0])
for label, vals in self._extraSpikeMons.iteritems():
assert label not in out.keys()
out[label] = self.getSpikeMonData(vals[0], vals[1])
return out
def getNetParams(self):
'''Get network and derived network parameters.'''
out = {}
out['options'] = self.no._einet_optdict
out['net_attr'] = self.getAttrDictionary()
return out
def getAllData(self):
'''
Save all the simulated data into a dictionary and return it.
'''
out = self.getNetParams()
# Spike monitors
# Note that getSpikes() is overridden in child classes and requires the
# espikes and ispikes arguments.
out.update(self.getSpikes(espikes=True, ispikes=True))
# Save state variables
out['stateMon_e'] = self.getStateMonData(self.stateMon_e)
out['stateMon_i'] = self.getStateMonData(self.stateMon_i)
for label, val in self._extraStateMons.iteritems():
assert label not in out.keys()
out[label] = self.getStateMonData(val)
return out
def saveSpikes(self, fileName):
'''
Save all the simulated spikes that have been recorded into a file.
Parameters
----------
fileName : string
Path and name of the file
'''
out = DataStorage.open(fileName, 'w')
d = self.getSpikes() # FIXME: what is d?
out.close()
def saveAll(self, fileName):
'''
Save all the simulated data that has been recorded into a file.
Parameters
----------
fileName : string
Path and name of the file
'''
out = DataStorage.open(fileName, 'w')
d = self.getAllData()
for key, val in d.iteritems():
out[key] = val
out.close()
class ConstantVelocityNetwork(BasicGridCellNetwork):
'''
A grid cell network that simulates a constant velocity in a specified
direction.
'''
def __init__(self, options, simulationOpts=None,
vel=[0.0, 0.0],
nrec_spikes=(None, None),
stateRecord_type='middle-center',
stateRecParams=(None, None)):
'''
Generate the network.
Parameters
----------
vel : a pair [x, y]
Velocity input vector, i.e. it specifies the direction and
magnitude of the velocity current.
'''
BasicGridCellNetwork.__init__(self,
options, simulationOpts,
nrec_spikes,
stateRecord_type,
stateRecParams)
self.setConstantVelocityCurrent_e(vel)
def getSpikes(self, **kw):
'''
Return a dictionary of spike monitor data.
For keyword arguments description, see
:meth:`~ConstantVelocityNetwork.getMinimalSaveData`
'''
espikes = kw.get('espikes', True)
ispikes = kw.get('ispikes', False)
out = {}
if espikes:
out['spikeMon_e'] = self.getSpikeMonData(self.spikeMon_e,
self.E_pop[0])
if ispikes:
out['spikeMon_i'] = self.getSpikeMonData(self.spikeMon_i,
self.I_pop[0])
return out
def getMinimalSaveData(self, **kw):
'''
Parameters
----------
espikes : bool
Whether to return spikes from the E population. Defaults to True.
ispikes : bool
Whether to return spikes from the I population. Defaults to False.
'''
out = self.getNetParams()
out.update(self.getSpikes(**kw))
return out
|
MattNolanLab/ei-attractor
|
grid_cell_model/models/gc_net_nest.py
|
Python
|
gpl-3.0
| 40,776
|
[
"Brian",
"NEURON"
] |
399ac71f0e5175afea649f2f49674aecae44f4177472693b9169c933c8c49e1a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 19 13:46:56 2018
@author: alex
"""
import re
import numpy as np
import logging
from .ParserData import MolecularOrbitals, Amplitudes
from .QCBase import QCMethod, VarNames as V
from .QCBase import var_tag
from .ParserTools import is_float
# create module logger
mLogger = logging.getLogger("CCParser.Gaussian")
def parse_tddft_list(i, data, columns=[0], asmatrix=False):
"""General function to parse tables from the TDDFT output.
Parameters
----------
i : int
Line number of hook.
data : list
Readlines container.
columns : list
Indices of columns to parse (python counting).
asmatrix : bool
Whether or not to return a numpy.matrix object from parsed data.
Returns
-------
the_list : array_like
List or np.matrix containing the requested values.
"""
index_line = i+2
the_list = []
while True:
line = data[index_line].split()
ncol = len(line)
if ncol == 0:
break
elif not is_float(line[0]):
break
the_list.append(list(map(float, [line[i] for i in columns])))
index_line += 1
if asmatrix:
return np.asmatrix(the_list)
else:
return the_list
class General(QCMethod):
"""Quantities that are not related to any method."""
def __init__(self):
super().__init__()# necessary for every derived class of QCMethod
# hooks as {function_name : hook_string}
self.hooks = {"has_finished" : "Normal termination of Gaussian"}
@var_tag(V.has_finished)
def has_finished(self, i, data):
""" Parse final statement that indicates if Gaussian finished
without errors. """
mLogger.info("whether Gaussian has finished successfully",
extra={"Parsed": V.has_finished})
return True
class TDDFT(QCMethod):
"""Parse TDDFT output."""
def __init__(self):
super().__init__()# necessary for every derived class of QCMethod
# hooks as {function_name : hook_string}
self.hooks = {"transition_dipole":("Ground to excited state transition"
" electric dipole moments"),
"exc_energy_rel": (r"Excited State\s+\d+:\s+(?P<Label>"
r"[A-Za-z0-9_?-]+)\s+(?P<ExcEV>\d+\.\d+) eV\s+"
r"(?P<ExcNM>\d+\.\d+) nm\s+f=(?P<Osc>\d+\.\d+)\s+"
r"<S\*\*2>=(?P<S2>\d+\.\d+)\s*"),
"state_label": (r"Excited State\s+\d+:\s+(?P<Label>"
r"[A-Za-z0-9_?-]+)\s+(?P<ExcEV>\d+\.\d+) eV\s+"
r"(?P<ExcNM>\d+\.\d+) nm\s+f=(?P<Osc>\d+\.\d+)\s+"
r"<S\*\*2>=(?P<S2>\d+\.\d+)\s*"),
"oscillator_strength": (r"Excited State\s+\d+:\s+(?P<Label>"
r"[A-Za-z0-9_?-]+)\s+(?P<ExcEV>\d+\.\d+) eV\s+"
r"(?P<ExcNM>\d+\.\d+) nm\s+f=(?P<Osc>\d+\.\d+)\s+"
r"<S\*\*2>=(?P<S2>\d+\.\d+)\s*"),
"amplitudes": r"Excited State\s+\d+:\s+"}
@var_tag(V.transition_dipole)
def transition_dipole(self, i, data):
"""Parse transition dipole moment in [a.u.]."""
mLogger.info("transition dipole moment",
extra={"Parsed": V.transition_dipole})
return parse_tddft_list(i, data, columns=[1,2,3])
@var_tag(V.exc_energy_rel)
def exc_energy_rel(self, i , data):
"""Parse excitation energy in [eV]."""
match = re.search(self.hooks["exc_energy_rel"], data[i])
mLogger.info("relative TDDFT excitation energy/-ies [eV]",
extra={"Parsed": V.exc_energy_rel})
return float(match.group("ExcEV"))
@var_tag(V.state_label)
def state_label(self, i , data):
"""Parse state label (multiplicity and symmetry group)."""
match = re.search(self.hooks["state_label"], data[i])
mLogger.info("state multiplicity and symmetry",
extra={"Parsed": V.state_label})
return match.group("Label")
@var_tag(V.osc_str)
def oscillator_strength(self, i , data):
"""Parse oscillator strength."""
match = re.search(self.hooks["oscillator_strength"], data[i])
mLogger.info("oscillator strength",
extra={"Parsed": V.osc_str})
return float(match.group("Osc"))
@var_tag(V.amplitudes)
def amplitudes(self, i, data):
""" Parse occ -> virt amplitudes """
pattern = r"\s+(?P<occ>\d+) -> (?P<virt>\d+)\s+(?P<v>-?\d+\.\d+)\s*"
j = 0 # line counter
amplist = []
while True:
m = re.search(pattern, data[i+1+j])
if m:
amplist.append([int(m.group("occ")), int(m.group("virt")),
float(m.group("v"))])
j += 1
else:
break
mLogger.info("TDDFT amplitudes", extra={"Parsed":V.amplitudes})
return Amplitudes.from_list(amplist, factor=2.0)
class Freq(QCMethod):
"""Parse frequency output."""
def __init__(self):
super().__init__()# necessary for every derived class of QCMethod
# hooks as {function_name : hook_string}
self.hooks = {"vibrational_freq" : "and normal coordinates:",
"infrared_intensity" : "and normal coordinates:"}
@var_tag(V.vib_freq)
def vibrational_freq(self, i , data):
"""Parse vibrational frequencies in [cm-1]."""
n = 0
freqs = []
while "------" not in data[i+n]:
if "Frequencies --" in data[i+n]:
freqs += data[i+n].split()[2:]
n += 1
freqs = list(map(float, freqs))
mLogger.info("vibrational frequencies in [cm-1]",
extra={"Parsed":V.vib_freq})
return freqs
@var_tag(V.vib_intensity)
def infrared_intensity(self, i, data):
"""Parse IR intensity in [km/mol]."""
n = 0
intensity = []
while "------" not in data[i+n]:
if "IR Inten --" in data[i+n]:
intensity += data[i+n].split()[3:]
n += 1
intensity = list(map(float, intensity))
mLogger.info("IR intensities in [km/mol]",
extra={"Parsed":V.vib_intensity})
return intensity
|
spectre007/CCParser
|
Gaussian.py
|
Python
|
mit
| 6,469
|
[
"Gaussian"
] |
4d2d0f4a0592a85f9930c03fdc9f912b0036a43c614fcecd2bc04760c61e58fc
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Methods to compute effective masses and other derivatives. """
__docformat__ = "restructuredtext en"
__all__ = ["Extract", "iter_emass", 'EMass', 'effective_mass']
from .extract import Extract as ExtractDFT
from ..tools.makeclass import makeclass, makefunc
from ..tools import make_cached
from .functional import Vasp
class Extract(ExtractDFT):
""" Extractor for reciprocal taylor expansion. """
def __init__(self, *args, **kwargs):
super(Extract, self).__init__(*args, **kwargs)
@property
def success(self):
""" True if successful run.
Checks this is an effective mass calculation.
"""
from .extract import Extract as ExtractDFT
try:
self._details
except:
return False
return ExtractDFT.success.__get__(self)
@property
@make_cached
def _details(self):
""" Parameters when calling the effective mass routine. """
from re import compile
from ..misc import exec_input
from ..error import GrepError
start = compile(r'^#+ EMASS DETAILS #+$')
end = compile(r'^#+ END EMASS DETAILS #')
with self.__outcar__() as file:
lines = None
for line in file:
if start.match(line):
lines = ""
break
if lines is None:
raise GrepError('Could not find call parameters.')
for line in file:
if end.match(line):
break
lines += line
input = exec_input(lines)
return {'center': input.center,
'nbpoints': input.nbpoints,
'input': input.input,
'range': input.range}
@property
def center(self): return self._details['center']
@property
def nbpoints(self): return self._details['nbpoints']
@property
def range(self): return self._details['range']
@property
def input(self): return self._details['input']
@staticmethod
def _orders(orders):
""" Order up to which taylor coefficients should be computed. """
result = orders
if result is None:
result = [0, 2]
if not hasattr(result, '__iter__'):
result = [result]
return sorted(result)
@property
def breakpoints(self):
""" Indices for start of each path. """
from numpy import any, abs, cross
breakpoints, last_dir = [0], None
for i, k in enumerate(self.kpoints[1:]):
if last_dir is None:
last_dir = k - self.kpoints[breakpoints[-1]]
elif any(abs(cross(last_dir, k - self.kpoints[breakpoints[-1]])) > 1e-8):
breakpoints.append(i + 1)
last_dir = None
return breakpoints + [len(self.kpoints)]
@property
def directions(self):
""" Direction for each path. """
from numpy import array
from numpy.linalg import norm
from quantities import angstrom
results = []
breakpoints = self.breakpoints
for start, end in zip(breakpoints[:-1], breakpoints[1:]):
results.append(self.kpoints[end - 1] - self.kpoints[start])
results[-1] /= norm(results[-1])
return array(results) / angstrom
def emass(self, orders=None):
""" Computes effective mass for each direction. """
from numpy import dot, concatenate, pi, array
from numpy.linalg import inv, lstsq
from math import factorial
from quantities import angstrom, emass, h_bar
from ..error import ValueError
orders = self._orders(orders)
if 2 not in orders:
raise ValueError('Cannot compute effective masses without second order term.')
results = []
breakpoints = self.breakpoints
recipcell = inv(self.structure.cell).T * 2e0 * pi / self.structure.scale
for start, end, direction in zip(breakpoints[:-1],
breakpoints[1:],
self.directions):
kpoints = self.kpoints[start:end]
x = dot(direction, dot(recipcell, kpoints.T))
measurements = self.eigenvalues[start:end].copy()
parameters = concatenate([x[:, None]**i / factorial(i) for i in orders], axis=1)
fit = lstsq(parameters, measurements)
results.append(fit[0][orders.index(2)])
result = (array(results) * self.eigenvalues.units * angstrom**2 / h_bar**2)
return 1. / result.rescale(1 / emass)
def fit_directions(self, orders=None):
""" Returns fit for computed directions.
When dealing with degenerate states, it is better to look at each
computed direction separately, since the order of bands might depend on
the direction (in which case it is difficult to construct a tensor).
"""
from numpy import dot, concatenate, pi
from numpy.linalg import inv, lstsq
from math import factorial
from ..error import ValueError
orders = self._orders(orders)
if 2 not in orders:
raise ValueError('Cannot compute effective masses without second order term.')
results = []
breakpoints = self.breakpoints
recipcell = inv(self.structure.cell).T * 2e0 * pi / self.structure.scale
for start, end, direction in zip(breakpoints[:-1],
breakpoints[1:],
self.directions):
kpoints = self.kpoints[start:end]
x = dot(direction, dot(recipcell, kpoints.T))
measurements = self.eigenvalues[start:end].copy()
parameters = concatenate([x[:, None]**i / factorial(i) for i in orders], axis=1)
fit = lstsq(parameters, measurements)
results.append(fit[0])
return results
def files(self, **kwargs):
""" Exports files from both calculations. """
from itertools import chain
for file in chain(super(Extract, self).files(**kwargs),
self.input.files(**kwargs)):
yield file
class _OnFinish(object):
""" Called when effective mass calculation finishes.
Adds some data to the calculation so we can figure out what the arguments
to the call.
"""
def __init__(self, previous, outcar, details):
super(_OnFinish, self).__init__()
self.details = details
self.outcar = outcar
self.previous = previous
def __call__(self, *args, **kwargs):
# first calls previous onfinish.
if self.previous is not None:
self.previous(*args, **kwargs)
# then adds data
header = ''.join(['#'] * 20)
with open(self.outcar, 'a') as file:
file.write('{0} {1} {0}\n'.format(header, 'EMASS DETAILS'))
file.write('{0}\n'.format(self.details))
file.write('{0} END {1} {0}\n'.format(header, 'EMASS DETAILS'))
def iter_emass(functional, structure=None, outdir=None, center=None,
nbpoints=3, directions=None, range=0.1, emassparams=None,
**kwargs):
""" Computes k-space taylor expansion of the eigenvalues up to given order.
First runs a vasp calculation using the first input argument, regardless
of whether a restart keyword argument is also passed. In practice,
following the general Pylada philosophy of never overwritting previous
calculations, this will not rerun a calculation if one exists in
``outdir``.
Second, a static non-self-consistent calculation is performed to compute
the eigenvalues for all relevant kpoints.
:param functional:
Two types are accepted:
- :py:class:`~vasp.Vasp` or derived functional: a self-consistent run
is performed and the resulting density is used as to define the
hamiltonian for which the effective mass is computed.
- :py:class:`~vasp.Extract` or derived functional: points to the
self-consistent calculations defining the hamiltonian for which the
effective mass is computed.
:param structure: The structure for wich to compute effective masses.
:type structure: `~pylada.crystal._cppwrapper.Structure`
:param center:
Central k-point of the taylor expansion. This should be given in
**reciprocal** units (eg coefficients to the reciprocal lattice
vectors). Default is None and means |Gamma|.
:type center: 3 floats
:param str outdir:
Root directory where to save results of calculation. Calculations
will be stored in "reciprocal" subdirectory of this input parameter.
:param int nbpoints:
Number of points (in a single direction) with wich to compute
taylor expansion. Should be at least order + 1. Default to 3.
:param directions:
Array of directions (cartesian coordinates). If None, defaults to a
reasonable set of directions: 001, 110, 111 and so forth. Note that
if given on input, then the tensors should not be extracted. The
directions are normalized. Eventually, the paths will extend from
``directions/norm(directions)*range`` to
``-directions/norm(directions)*range``.
:type directions: list of 3d-vectors or None
:param float range:
Extent of the grid around the central k-point.
:param dict emassparams:
Parameters for the (non-self-consistent) effective mass caclulation
proper. For instance, could include pertubative spin-orbit
(:py:attr:`~vasp.functional.Vasp.lsorbit`).
:param kwargs:
Extra parameters which are passed on to vasp, both for the initial
calculation and the effective mass calculation proper.
:return: Extraction object from which masses can be obtained.
.. |pi| unicode:: U+003C0 .. GREEK SMALL LETTER PI
.. |Gamma| unicode:: U+00393 .. GREEK CAPITAL LETTER GAMMA
"""
from copy import deepcopy
from os import getcwd
from os.path import join, samefile, exists
from numpy import array, dot, arange, sqrt
from numpy.linalg import inv, norm
from ..error import input as InputError
from ..misc import RelativePath
from . import Vasp
# save input details for printing later on.
details = 'directions = {0!r}\n' \
'range = {1!r}\n' \
'center = {2!r}\n' \
'nbpoints = {3!r}\n' \
.format(directions, range, center, nbpoints)
# takes care of default parameters.
if center is None:
center = kwargs.pop("kpoint", [0, 0, 0])
center = array(center, dtype="float64")
if outdir is None:
outdir = getcwd()
# If has an 'iter' function, then calls it.
if hasattr(functional, 'iter'):
if structure is None:
raise InputError('If the first argument to iter_emass is a functional, '
'then a structure must also be given on which to '
'apply the CRYSTAL functional.')
for input in functional.iter(structure, outdir=outdir, **kwargs):
if getattr(input, 'success', False):
continue
elif hasattr(input, 'success'):
yield Extract(outdir)
return
yield input
# if is callable, then calls it.
elif hasattr(functional, '__call__'):
input = functional(structure, outdir=outdir, **kwargs)
# otherwise, assume it is an extraction object.
else:
input = functional
# creates a new VASP functional from the input.
functional = Vasp(copy=input.functional)
# check that self-consistent run was successful.
if not input.success:
yield input
return
# prepare second run.
center = dot(inv(input.structure.cell).T, center)
if directions is None:
kpoints = array([[1, 0, 0], [-1, 0, 0],
[0, 1, 0], [0, -1, 0],
[0, 0, 1], [0, 0, -1],
[1, 0, 1], [-1, 0, -1],
[0, 1, 1], [0, -1, -1],
[1, 1, 0], [-1, -1, 0],
[1, 0, -1], [-1, 0, 1],
[0, -1, 1], [0, 1, -1],
[-1, 1, 0], [1, -1, 0],
[1, 1, 1], [-1, -1, -1],
[1, 1, -1], [-1, -1, 1],
[1, -1, 1], [-1, 1, -1],
[-1, 1, 1], [1, -1, -1]], dtype='float64')
kpoints[6:18] *= 1e0 / sqrt(2.)
kpoints[18:] *= 1e0 / sqrt(3.)
else:
directions = array(directions).reshape(-1, 3)
directions = array([array(d) / norm(d) for d in directions])
points = arange(-0.5, 0.5 + 1e-8, 1.0 / float(nbpoints))
kpoints = array([d * p for d in directions for p in points])
functional.kpoints = kpoints * range + center
# and exectute it.
# onfinish is modified so that parameters are always included.
kwargs = deepcopy(kwargs)
kwargs['restart'] = input
kwargs['nonscf'] = True
kwargs['relaxation'] = None
if emassparams is not None:
kwargs.update(emassparams)
if outdir is None:
outdir = getcwd()
if outdir is not None:
if exists(outdir) and samefile(outdir, input.directory):
outdir = join(input.directory, "reciprocal")
# saves input calculations into the details
if isinstance(getattr(input, '_directory', None), RelativePath):
input = deepcopy(input)
input._directory.envvar = outdir
details += 'from {0.__class__.__module__} import {0.__class__.__name__}\n' \
.format(input)
details += 'input = {0!r}\n'.format(input)
for u in functional.iter(input.structure, outdir=outdir, **kwargs):
if getattr(u, 'success', False):
continue
if hasattr(u, 'success'):
yield u
return
# modify onfinish so that call arguments are added to the output file.
onfinish = _OnFinish(u.onfinish, join(outdir, 'OUTCAR'), details)
u.onfinish = onfinish
yield u
yield iter_emass.Extract(outdir)
iter_emass.Extract = Extract
""" Extractor class for the reciprocal method. """
EMass = makeclass('EMass', Vasp, iter_emass, None, module='pylada.vasp.emass',
doc='Functional form of the '
':py:class:`pylada.emass.relax.iter_emass` method.')
# Function call to effective mass. No iterations. returns when calculations are
# done or fail.
effective_mass = makefunc('effective_mass', iter_emass, 'pylada.vasp.emass')
effective_mass.Extract = iter_emass.Extract
del makefunc
del makeclass
del ExtractDFT
del make_cached
del Vasp
|
pylada/pylada-light
|
src/pylada/vasp/emass.py
|
Python
|
gpl-3.0
| 16,450
|
[
"CRYSTAL",
"VASP"
] |
b626c193fc967b63cd3467747e1edb41998e5b09aefd63016dadaeff9ab7592b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: openshift_raw
short_description: Manage OpenShift objects
version_added: "2.5"
author: "Chris Houseknecht (@chouseknecht)"
description:
- Use the OpenShift Python client to perform CRUD operations on OpenShift objects.
- Pass the object definition from a source file or inline. See examples for reading
files and using Jinja templates.
- Access to the full range of K8s and OpenShift APIs.
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- k8s_state_options
- k8s_name_options
- k8s_resource_options
- k8s_auth_options
options:
description:
description:
- Use only when creating a project, otherwise ignored. Adds a description to the project
metadata.
display_name:
description:
- Use only when creating a project, otherwise ignored. Adds a display name to the project
metadata.
requirements:
- "python >= 2.7"
- "openshift == 0.4.1"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Create a project
openshift_raw:
api_version: v1
kind: Project
name: testing
description: Testing
display_name: "This is a test project."
state: present
- name: Create a Persistent Volume Claim from an inline definition
openshift_raw:
state: present
definition:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elastic-volume
namespace: testing
spec:
resources:
requests:
storage: 5Gi
accessModes:
- ReadWriteOnce
- name: Create a Deployment from an inline definition
openshift_raw:
state: present
definition:
apiVersion: v1
kind: DeploymentConfig
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ["elasticsearch"]
image: "ansible/galaxy-elasticsearch:2.4.6"
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
replicas: 1
strategy:
type: Rolling
- name: Remove an existing Deployment
openshift_raw:
api_version: v1
kind: DeploymentConfig
name: elastic
namespace: testing
state: absent
- name: Create a Secret
openshift_raw:
definition:
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: testing
type: Opaque
data:
username: "{{ 'admin' | b64encode }}"
password: "{{ 'foobard' | b64encode }}"
- name: Retrieve a Secret
openshift_raw:
api: v1
kind: Secret
name: mysecret
namespace: testing
register: mysecret
# Passing the object definition from a file
- name: Create a Deployment by reading the definition from a local file
openshift_raw:
state: present
src: /testing/deployment.yml
- name: Read definition file from the Ansible controller file system
openshift_raw:
state: present
definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
- name: Read definition file from the Ansible controller file system after Jinja templating
openshift_raw:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
items:
description: Returned only when the I(kind) is a List type resource. Contains a set of objects.
returned: when resource is a List
type: list
'''
from ansible.module_utils.k8s.raw import OpenShiftRawModule
def main():
OpenShiftRawModule().execute_module()
if __name__ == '__main__':
main()
|
cyberark-bizdev/ansible
|
lib/ansible/modules/clustering/openshift/openshift_raw.py
|
Python
|
gpl-3.0
| 5,342
|
[
"Galaxy"
] |
6ee8c7013f1996e1e0a4d037e1b680a3e8c1c771c3f190c993d98548a6ed0fb4
|
# This sucks!
print('Version 0.03. This is the latest version.')
print('Please help me to improve it reporting bugs to guido.sterbini@cern.ch.')
# Fixes with respect to v0.2.
# unixtime2datetimeVectorize modified len with np.size
# Fixes with respect to v0.1.
# Fixed the bug of the local/UTC time in importing the matlab files
# Avoid importing multiple time the same variable from CALS or TIMBER
# Fixes with respect to v0.00.
# Thanks to Panos who spotted an error in the vectorization of the time stamp
get_ipython().magic('matplotlib inline')
import os
import glob
import scipy.io
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.dates as md
import matplotlib
import pandas as pnd
import platform
import math
import sys
import time
from IPython.display import Image, display, HTML
from scipy.optimize import curve_fit
#!git clone https://github.com/rdemaria/pytimber.git
#sys.path.insert(0,'/eos/user/s/sterbini/MD_ANALYSIS/public/pytimber/pytimber')
import pytimber;
#pytimber.__file__
try:
import seaborn as sns
except:
print('If you want to use "seaborn" package, install it from a SWAN terminal "pip install --user seaborn"')
os.environ['LD_LIBRARY_PATH']=os.environ['LD_LIBRARY_PATH'] +':/eos/user/s/sterbini/MD_ANALYSIS/public/sharedLibraries/'
os.environ['PATH']=os.environ['PATH']+':/eos/user/s/sterbini/MD_ANALYSIS/public/'
plt.rcParams['axes.grid']=False
plt.rcParams['axes.facecolor']='none'
plt.rcParams['axes.grid.axis']='both'
plt.rcParams['axes.spines.bottom']=True
plt.rcParams['axes.spines.left']=True
plt.rcParams['axes.spines.right']=True
plt.rcParams['axes.spines.top']=True
plt.rcParams
sns.set_style("white")
sns.set_style("ticks")
sns.set_context("paper")
plt.rcParams['xtick.direction']='in'
plt.rcParams['ytick.direction']='in'
def cm2inch(*tupl):
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
plt.rcParams['xtick.labelsize']=14
plt.rcParams['ytick.labelsize']=plt.rcParams['xtick.labelsize']
plt.rcParams['axes.titlesize']=plt.rcParams['xtick.labelsize']
plt.rcParams['axes.labelsize']=plt.rcParams['xtick.labelsize']
plt.rcParams['legend.fontsize']=plt.rcParams['xtick.labelsize']
matplotlib.rcParams.update({'font.size': 8*2})
matplotlib.rc('font',**{'family':'serif'})
plt.rcParams['figure.figsize']=cm2inch(7.5*2.5,4.7*2.5) #cm2inch(7.5,4.7)
#matplotlib.rcParams['figure.figsize']=(15,7.5)
#matplotlib.rcParams.update({'font.size': 15})
#%config InlineBackend.figure_format = 'retina'
import matplotlib.dates as mdates
def check_ping(hostname):
response = os.system("ping -c1 -W1 " + hostname)
# and then check the response...
if response == 0:
pingstatus = True
else:
pingstatus = False
return pingstatus
def convertUnixTime(a):
return 719163+a/3600/24
def tagIt(a):
"""Use the tag_it(\'tag1, tag2\') function to tag the notebook. It will be useful for sorting them with a grep. """
print('TAGS: '+ a)
def whereamI():
import socket
return socket.gethostbyname(socket.gethostname())
generalInfo={'myIP': whereamI(), 'myPWD': os.getcwd(), 'platform': platform.platform()}
#plt.rcParams['figure.figsize'] = (10, 8)
print('Your platform is ' + generalInfo['platform'])
print('Your folder is ' + generalInfo['myPWD'])
print('Your IP is ' + generalInfo['myIP'])
print(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S'))
class dotdict(dict):
'''A dict with dot access and autocompletion.
The idea and most of the code was taken from
http://stackoverflow.com/a/23689767,
http://code.activestate.com/recipes/52308-the-simple-but-handy-collector-of-a-bunch-of-named/
http://stackoverflow.com/questions/2390827/how-to-properly-subclass-dict-and-override-get-set
'''
def __init__(self,*a,**kw):
dict.__init__(self)
self.update(*a, **kw)
self.__dict__ = self
def __setattr__(self, key, value):
if key in dict.__dict__:
raise AttributeError('This key is reserved for the dict methods.')
dict.__setattr__(self, key, value)
def __setitem__(self, key, value):
if key in dict.__dict__:
raise AttributeError('This key is reserved for the dict methods.')
dict.__setitem__(self, key, value)
def update(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).items():
self[k] = v
def __getstate__(self):
return self
def __setstate__(self, state):
self.update(state)
self.__dict__ = self
# In[ ]:
# In[1]:
if 'log' not in locals():
log=pytimber.LoggingDB()
class myToolbox:
import functools
speedOfLight=299792458;
@staticmethod
def plotSamplerFromObject(myobject, scale=1):
info=myobject
unitFactor=info.Samples.value.timeUnitFactor
firstSampleTime=info.Samples.value.firstSampleTime
samplingTrain=info.Samples.value.samplingTrain
data=info.Samples.value.samples*scale
x=np.arange(firstSampleTime*unitFactor,samplingTrain*unitFactor*len(data)+
firstSampleTime*unitFactor,
samplingTrain*unitFactor)
plt.plot(x,data);
plt.grid()
plt.xlabel('t [s]')
myCycleStamp=np.array(myToolbox.unixtime2datetime(myobject.Samples.cycleStamp/1e9))
myCycleStamp=myCycleStamp.all()
plt.title(myobject.Samples.cycleName + ', ' + myCycleStamp.ctime() )
@staticmethod
def plotOasisFromObject(myobject,timeFactor=1, myLabel=''):
myScope=myobject
offset=myScope.Acquisition.value.offset
mySignal=myScope.Acquisition.value.value*myScope.Acquisition.value.sensitivity+offset
myTime=np.arange(0,(len(mySignal)),1)*myScope.Acquisition.value.sampleInterval*1e-9*timeFactor
plt.plot(myTime,mySignal, label= myLabel)
plt.grid('on')
plt.xlabel('t [s/' + str(timeFactor) + ']')
plt.ylabel('[V]')
@staticmethod
def plotSuperSamplerFromObject(myobject):
info=myobject
unitFactor=info.SuperSamples.value.superTimeUnitFactor
firstSampleTime=info.SuperSamples.value.firstSuperSampleTime
samplingTrain=info.SuperSamples.value.superSamplingTrain
data=info.SuperSamples.value.superSamples
x=np.arange(firstSampleTime*unitFactor,samplingTrain*unitFactor*len(data)+
firstSampleTime*unitFactor,
samplingTrain*unitFactor)
plt.plot(x,data);
plt.grid()
plt.xlabel('t [s]')
myCycleStamp=np.array(myToolbox.unixtime2datetime(myobject.SuperSamples.value.superCycleStamp/1e9))
myCycleStamp=myCycleStamp.all()
plt.title(myobject.SuperSamples.cycleName + ', ' + myCycleStamp.ctime() )
@staticmethod
def plotSamplerFromDataFrame(dateframeColumn, index, dataFormatField):
info=dataFormatField
aux=dateframeColumn
unitFactor=info.Samples.value.timeUnitFactor
firstSampleTime=info.Samples.value.firstSampleTime
samplingTrain=info.Samples.value.samplingTrain
x=np.arange(firstSampleTime*unitFactor,samplingTrain*unitFactor*len(aux.get_values()[index])+
firstSampleTime*unitFactor,
samplingTrain*unitFactor)
plt.plot(x,aux.get_values()[index]);
plt.grid()
plt.xlabel('t [s]')
@staticmethod
def addToDataFrameFromCALS(myDataFrame, variables,offset_second=0, verbose=False):
#variables=['CPS.TGM:USER']
variables=list(set(variables))
cycleStamps=myDataFrame['cycleStamp'].get_values()
# select the time interval
for j in variables:
aux=j.replace('.','_');
aux=aux.replace(':','_')
aux=aux.replace(' ','_')
exec(aux+'=[]')
for i in cycleStamps:
ts1=datetime.datetime.utcfromtimestamp(i/1000000000.-.5+offset_second)
ts2=datetime.datetime.utcfromtimestamp(i/1000000000.+.5+offset_second)
if verbose:
print(ts1)
DATA=log.get(variables,ts1,ts2)
for j in variables:
aux=j.replace('.','_');
aux=aux.replace(':','_')
aux=aux.replace(' ','_')
exec('myToolbox.test=len(DATA[\'' + j + '\'][1])')
if myToolbox.test:
exec(aux + '.append(DATA[\'' + j + '\'][1][0])')
else:
exec(aux + '.append(np.nan)')
if offset_second>0:
myString='_positiveOffset_'+str(offset_second)+'_s'
if offset_second<0:
myString='_negativeOffset_'+ str(-1*offset_second)+'_s'
if offset_second==0:
myString=''
for j in variables:
aux=j.replace('.','_');
aux=aux.replace(':','_')
aux=aux.replace(' ','_')
exec('myDataFrame[\'' + j + myString + '\']=pnd.Series(' +aux+ ',myDataFrame.index)')
@staticmethod
def addSingleVariableFromMatlab(myInput, myVariable):
data=myToolbox.japcMatlabImport(myInput);
myDataFrame=pnd.DataFrame({})
a=[]
if hasattr(data,myVariable.split('.')[0]):
exec('a.append(data.' + myVariable +')')
else:
exec('a.append(np.nan)')
return a[0]
@staticmethod
def addToDataFrameFromMatlab(myDataFrame, listOfVariableToAdd):
listOfVariableToAdd=list(set(listOfVariableToAdd))
for j in listOfVariableToAdd:
myDataFrame[j]= myDataFrame['matlabFilePath'].apply(lambda myInput: myToolbox.addSingleVariableFromMatlab(myInput,j))
@staticmethod
def fromMatlabToDataFrame(listing, listOfVariableToAdd, verbose=False, matlabFullInfo=False):
listOfVariableToAdd=list(set(listOfVariableToAdd))
myDataFrame=pnd.DataFrame({})
cycleStamp=[]
cycleStampHuman=[]
PLS_matlab=[]
matlabObject=[]
matlabFilePath=[]
for j in listOfVariableToAdd:
exec(j.replace('.','_')+'=[]')
for i in listing:
if verbose:
print(i)
data=myToolbox.japcMatlabImport(i);
if matlabFullInfo:
matlabObject.append(data)
#to correct
localCycleStamp=np.max(data.headerCycleStamps);
deltaLocal_UTC=datetime.datetime.fromtimestamp(localCycleStamp/1e9)-datetime.datetime.utcfromtimestamp(localCycleStamp/1e9)
utcCycleStamp=localCycleStamp+deltaLocal_UTC.total_seconds()*1e9
cycleStamp.append(utcCycleStamp)
aux=myToolbox.unixtime2datetimeVectorize(np.max(data.headerCycleStamps)/1e9)
cycleStampHuman.append(aux.tolist())
PLS_matlab.append(data.cycleName)
matlabFilePath.append(os.path.abspath(i))
for j in listOfVariableToAdd:
if hasattr(data,j.split('.')[0]):
exec(j.replace('.','_') + '.append(data.' + j + ')')
else:
exec(j.replace('.','_') + '.append(np.nan)')
myDataFrame['cycleStamp']=pnd.Series(cycleStamp,cycleStampHuman)
myDataFrame['matlabPLS']=pnd.Series(PLS_matlab,cycleStampHuman)
myDataFrame['matlabFilePath']=pnd.Series(matlabFilePath,cycleStampHuman)
if matlabFullInfo:
myDataFrame['matlabFullInfo']=pnd.Series(matlabObject,cycleStampHuman)
for j in listOfVariableToAdd:
exec('myDataFrame[\'' + j + '\']=pnd.Series(' +j.replace('.','_')+ ',cycleStampHuman)') #myDataFrame=pnd.DataFrame({j:aux,
return myDataFrame
@staticmethod
def fromTimberToDataFrame(listOfVariableToAdd,t1,t2,verbose=False, fundamental=''):
listOfVariableToAdd=list(set(listOfVariableToAdd))
#ts1=datetime.datetime(2016,7,1)
#ts2=datetime.datetime(2016,7,2)
#listOfVariableToAdd=['CPS.LSA:CYCLE','CPS.TGM:DEST']
if fundamental=='':
DATA=log.get(listOfVariableToAdd,t1,t2 )
else:
DATA=log.get(listOfVariableToAdd,t1,t2,fundamental)
myDataFrame=pnd.DataFrame({})
if DATA!={}:
for i in listOfVariableToAdd:
myDataFrame[i]=pnd.Series(DATA[i][1].tolist(),myToolbox.unixtime2datetimeVectorize(DATA[i][0]))
myDataFrame['cycleStamp']=np.double(myDataFrame.index.astype(np.int64))
return myDataFrame
@staticmethod
def japcMatlabImport(myfile):
"""Import the in a python structure a matlab file"""
myDataStruct = scipy.io.loadmat(myfile,squeeze_me=True, struct_as_record=False)
return myDataStruct['myDataStruct']
@staticmethod
def unixtime2datetime(x):
return datetime.datetime.fromtimestamp(x)
@staticmethod
def unixtime2datetimeVectorize(x):
"""Transform unixtime in python datetime"""
aux=np.vectorize(myToolbox.unixtime2datetime)
if np.size(x)!=0:
return aux(x)
else:
return []
@staticmethod
def unixtime2utcdatetime(x):
return datetime.datetime.utcfromtimestamp(x)
@staticmethod
def unixtime2utcdatetimeVectorize(x):
"""Transform unixtime in python datetime"""
aux=np.vectorize(myToolbox.unixtime2utcdatetime)
return aux(x)
@staticmethod
def gaussian(x, mu, sig):
"""gaussian(x, mu, sig)"""
return 1/np.sqrt(2*np.pi)/sig*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
@staticmethod
def gaussian_5_parameters(x, c, m, A, mu, sig):
"""gaussian_5_parameter(x, c, m, A, mu, sig)"""
return c+m*x+A/np.sqrt(2*np.pi)/sig*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
@staticmethod
def FilterSpill(mySpill):
baseline=0
k=0
myNewSpill=[]
for i in np.arange(0,len(mySpill)):
x = mySpill[i] - baseline;
y = x + k * 3.2e-5;
k += x;
if i<2000 or i > 23120:
baseline += y / 5.;
myNewSpill.append(y)
return myNewSpill
@staticmethod
def MTE_efficiencyReduced(mySpill):
if not (np.isnan(mySpill)).any():
myNewSpill=myToolbox.FilterSpill(mySpill)
b1_idx=2066
b2_idx=6267
b3_idx=10468
b4_idx=14669
b5_idx=18871
b6_idx=23072
is1=np.mean(myNewSpill[b1_idx:b2_idx])
is2=np.mean(myNewSpill[b2_idx:b3_idx])
is3=np.mean(myNewSpill[b3_idx:b4_idx])
is4=np.mean(myNewSpill[b4_idx:b5_idx])
core=np.mean(myNewSpill[b5_idx:b6_idx])
mySum=(is1+is2+is3+is4+core);
MTE_efficiency=np.mean([is1,is2,is3,is4])/mySum;
else:
MTE_efficiency=np.nan
return MTE_efficiency
@staticmethod
def MTE_efficiency(mySpill):
if not (np.isnan(mySpill)).any():
myNewSpill=myToolbox.FilterSpill(mySpill)
b1_idx=2066
b2_idx=6267
b3_idx=10468
b4_idx=14669
b5_idx=18871
b6_idx=23072
is1=np.mean(myNewSpill[b1_idx:b2_idx])
is2=np.mean(myNewSpill[b2_idx:b3_idx])
is3=np.mean(myNewSpill[b3_idx:b4_idx])
is4=np.mean(myNewSpill[b4_idx:b5_idx])
core=np.mean(myNewSpill[b5_idx:b6_idx])
mySum=(is1+is2+is3+is4+core);
MTE_efficiency=np.mean([is1,is2,is3,is4])/mySum;
myFFT=np.fft.fft(myNewSpill);
FFTabs=np.abs(myFFT[3500:4500]) # 175:225 MHz
FFTphase=np.angle(myFFT[3500:4500]) # 175:225 MHz
return {"MTE_efficiency": MTE_efficiency,\
"Island1": is1/mySum,\
"Island2": is2/mySum,\
"Island3": is3/mySum,\
"Island4": is4/mySum,\
"Core" : core/mySum,\
"FFT_abs" : FFTabs,\
"FFT_phase" : FFTphase,\
"mySum": mySum}
else:
return {"MTE_efficiency": np.nan, \
"Island1": np.nan,\
"Island2": np.nan,\
"Island3": np.nan,\
"Island4": np.nan,\
"Core" : np.nan,\
"FFT_abs" : np.nan,\
"FFT_phase" : np.nan,
"mySum": np.nan}
@staticmethod
def myFirst(x):
return x[0]
@staticmethod
def mySecond(x):
return x[1]
@staticmethod
def first(df):
return np.array(list(map(myToolbox.myFirst,df.get_values())))
@staticmethod
def second(df):
return np.array(list(map(myToolbox.mySecond,df.get_values())))
@staticmethod
def line():
return '<hr style="border-top-width: 4px; border-top-color: #34609b;">'
@staticmethod
def computeTransverseEmittance(WS_position_um,WS_profile_arb_unit,off_momentum_distribution_arb_unit,deltaP_P,betaGammaRelativistic,betaOptical_m,Dispersion_m):
x_inj=WS_position_um/1000;
y_inj=WS_profile_arb_unit;
popt,pcov = myToolbox.makeGaussianFit_5_parameters(x_inj,y_inj)
#y_inj_1=myToolbox.gaussian_5_parameters(x_inj,popt[0],popt[1],popt[2],popt[3],popt[4])
y_inj_1=myToolbox.gaussian_5_parameters(x_inj,popt[0],popt[1],popt[2],popt[3],popt[4])
y_inj_2=y_inj-popt[0]-popt[1]*x_inj
x_inj_2=x_inj-popt[3]
x_inj_3=np.linspace(-40,40,1000);
y_inj_3=scipy.interpolate.interp1d(x_inj_2,y_inj_2)(x_inj_3)
y_inj_4=y_inj_3/np.trapz(y_inj_3,x_inj_3)
y_inj_5=(y_inj_4+y_inj_4[::-1])/2
WS_profile_step1_5GaussianFit=y_inj_1
WS_profile_step2_dropping_baseline=y_inj_2
WS_profile_step3_interpolation=y_inj_3
WS_profile_step4_normalization=y_inj_4
WS_profile_step5_symmetric=y_inj_5
WS_position_step1_centering_mm=x_inj_2;
WS_position_step2_interpolation_mm=x_inj_3;
Dispersion_mm=Dispersion_m*1000
Dispersive_position_step1_mm=deltaP_P*Dispersion_mm
Dispersive_profile_step1_normalized=off_momentum_distribution_arb_unit/np.trapz(off_momentum_distribution_arb_unit,Dispersive_position_step1_mm)
Dispersive_position_step2_mm=WS_position_step2_interpolation_mm
Dispersive_step2_interpolation=scipy.interpolate.interp1d(Dispersive_position_step1_mm,Dispersive_profile_step1_normalized,bounds_error=0,fill_value=0)(Dispersive_position_step2_mm)
Dispersive_step3_symmetric=(Dispersive_step2_interpolation+Dispersive_step2_interpolation[::-1])/2
def myConvolution(WS_position_step2_interpolation_mm,sigma):
myConv=np.convolve(Dispersive_step3_symmetric, myToolbox.gaussian(WS_position_step2_interpolation_mm,0,sigma), 'same')
myConv/=np.trapz(myConv,WS_position_step2_interpolation_mm)
return myConv
def myError(sigma):
myConv=np.convolve(Dispersive_step3_symmetric, myToolbox.gaussian(WS_position_step2_interpolation_mm,0,sigma), 'same')
myConv/=np.trapz(myConv,WS_position_step2_interpolation_mm)
aux=myConv-WS_profile_step5_symmetric
return np.std(aux), aux, myConv
popt,pcov = curve_fit(myConvolution,WS_position_step2_interpolation_mm,WS_profile_step5_symmetric,p0=[1])
sigma=popt;
emittance=sigma**2/betaOptical_m*betaGammaRelativistic
return {'emittance_um':emittance,'sigma_mm':sigma,'WS_position_mm':WS_position_step2_interpolation_mm, 'WS_profile': WS_profile_step5_symmetric, 'Dispersive_position_mm':Dispersive_position_step2_mm, 'Dispersive_profile':Dispersive_step3_symmetric,
'convolutionBackComputed':myConvolution(WS_position_step2_interpolation_mm,sigma),
'betatronicProfile':myToolbox.gaussian(WS_position_step2_interpolation_mm,0,sigma)
}
@staticmethod
#thanks to Hannes
def makeGaussianFit_5_parameters(X,Y):
i = np.where( (X>min(X)+1e-3) & (X<max(X)-1e-3) )
X = X[i]
Y = Y[i]
indx_max = np.argmax(Y)
mu0 = X[indx_max]
window = 2*100
x_tmp = X[indx_max-window:indx_max+window]
y_tmp = Y[indx_max-window:indx_max+window]
offs0 = min(y_tmp)
ampl = max(y_tmp)-offs0
x1 = x_tmp[np.searchsorted(y_tmp[:window], offs0+ampl/2)]
x2 = x_tmp[np.searchsorted(-y_tmp[window:], -offs0+ampl/2)]
FWHM = x2-x1
sigma0 = np.abs(2*FWHM/2.355)
ampl *= np.sqrt(2*np.pi)*sigma0
slope = 0
popt,pcov = curve_fit(myToolbox.gaussian_5_parameters,X,Y,p0=[offs0,slope,ampl,mu0,sigma0])
return popt,pcov
@staticmethod
def tricks():
print("=== Highlight a region of a plot ===")
print("""plt.gca().fill_between([startBLU, endBLU], [-.015,-.015], [.015,.015],color='g', alpha=.1)""")
print("====")
print("""from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code
</form>''')""")
print('=== set x-axis date ===')
print("""import matplotlib.dates as mdates
myFig=plt.figure()
ax=myFig.add_subplot(1,1,1)
plt.plot([datetime.datetime(2016,9,23),datetime.datetime(2016,9,28,3)],[1,2])
plt.xlim([datetime.datetime(2016,9,23),datetime.datetime(2016,9,28,3)])
plt.ylim([.195,.218])
plt.ylabel('MTE efficiency')
t = np.arange(datetime.datetime(2016,9,23), datetime.datetime(2016,9,29), datetime.timedelta(hours=24)).astype(datetime.datetime)
plt.xticks(t);
myFmt =mdates.DateFormatter('%m/%d')
ax.xaxis.set_major_formatter(myFmt);""")
print('=== RETINA ===')
print('% config InlineBackend.figure_format = \'retina\'')
print('===list matlab attributes===')
print("""aux=myDataFormat.PR_BQS72.SamplerAcquisition.value.__dict__
for i in aux['_fieldnames']:
print(i+ ': ' + str(aux[i]))""")
print('===SHOW matlab file===')
print('''
aux=myDataFormat.PR_BQS72.Acquisition.value.__dict__
for i in aux['_fieldnames']:
print(i+ ': ' + str(aux[i]))
''')
@staticmethod
def hideCode():
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code
</form>''')
@staticmethod
def E0_GeV(particle='proton'):
#TODO
return 0.9382723
@staticmethod
def Ek_GeVToP_GeV(Ek_GeV):
T=Ek_GeV
E0=myToolbox.E0_GeV()
return np.sqrt(T*(2*E0+T))
@staticmethod
def P_GeVToEk_GeV(cp):
return np.sqrt(myToolbox.E0_GeV()**2+cp**2)-myToolbox.E0_GeV()
@staticmethod
def P_GeVToEtot_GeV(cp):
return np.sqrt(myToolbox.E0_GeV()**2+cp**2)
@staticmethod
def P_GeVToBeta(cp):
return cp*1.0/myToolbox.P_GeVToEtot_GeV(cp)
@staticmethod
def P_GeVToGamma(cp):
return myToolbox.P_GeVToEtot_GeV(cp)/myToolbox.E0_GeV()
@staticmethod
def PS_circunference_m():
return 100.*2*np.pi
@staticmethod
def PS_frev_kHz(cp):
return myToolbox.speedOfLight*myToolbox.P_GeVToBeta(cp)/myToolbox.PS_circunference_m()/1000.
@staticmethod
def P_GeVandRelativisticParameters(cp):
return {'Ek':myToolbox.P_GeVToEk_GeV(cp),
'Etot':myToolbox.P_GeVToEtot_GeV(cp),
'beta':myToolbox.P_GeVToBeta(cp),
'gamma':myToolbox.P_GeVToGamma(cp),
'PS_frev_kHz':myToolbox.PS_frev_kHz(cp),
'PS_Trev_us':1000./myToolbox.PS_frev_kHz(cp)}
@staticmethod
def fromTFStoDF(file):
import metaclass as metaclass
a=metaclass.twiss(file);
aux=[]
aux1=[]
for i in dir(a):
if not i[0]=='_':
if type(getattr(a,i)) is float:
#print(i + ":"+ str(type(getattr(a,i))))
aux.append(i)
aux1.append(getattr(a,i))
if type(getattr(a,i)) is str:
#print(i + ":"+ str(type(getattr(a,i))))
aux.append(i)
aux1.append(getattr(a,i))
myList=[]
myColumns=[]
for i in a.keys:
myContainer=getattr(a, i)
if len(myContainer)==0:
print("The column "+ i + ' is empty.')
else:
myColumns.append(i)
myList.append(myContainer)
optics=pnd.DataFrame(np.transpose(myList), index=a.S,columns=myColumns)
for i in optics.columns:
aux3= optics.iloc[0][i]
if type(aux3) is str:
aux3=str.replace(aux3, '+', '')
aux3=str.replace(aux3, '-', '')
aux3=str.replace(aux3, '.', '')
aux3=str.replace(aux3, 'e', '')
aux3=str.replace(aux3, 'E', '')
if aux3.isdigit():
optics[i]=optics[i].apply(np.double)
aux.append('FILE_NAME')
aux1.append(os.path.abspath(file))
aux.append('TABLE')
aux1.append(optics)
globalDF=pnd.DataFrame([aux1], columns=aux)
return globalDF
@staticmethod
def plotLattice(ax,DF, height=1., v_offset=0., color='r',alpha=0.5):
for i in range(len(DF)):
aux=DF.iloc[i]
ax.add_patch(
patches.Rectangle(
(aux.S-aux.L, v_offset-height/2.), # (x,y)
aux.L, # width
height, # height
color=color, alpha=alpha
)
)
return;
@staticmethod
def latexIt(a):
from matplotlib import rc
rc('text', usetex=a)
@staticmethod
def cals2pnd(listOfVariableToAdd,t1,t2,verbose=False, fundamental=''):
'''
cals2pnd(listOfVariableToAdd,t1,t2,verbose=False, fundamental='')
This function return a PANDAS dataframe of the listOfVariableToAdd within the interval t1-t2.
It can be used in the verbose mode if the corresponding flag is True.
It can be used to filter fundamentals.
'''
listOfVariableToAdd=list(set(listOfVariableToAdd))
if fundamental=='':
if verbose:
print('No fundamental filter.')
DATA=log.get(listOfVariableToAdd,t1,t2 )
else:
DATA=log.get(listOfVariableToAdd,t1,t2,fundamental)
myDataFrame=pnd.DataFrame({})
if DATA!={}:
for i in listOfVariableToAdd:
if verbose:
print('Eleaborating variable: '+ i)
auxDataFrame=pnd.DataFrame({})
auxDataFrame[i]=pnd.Series(DATA[i][1].tolist(),myToolbox.unixtime2datetimeVectorize(DATA[i][0]))
myDataFrame=pnd.merge(myDataFrame,auxDataFrame, how='outer',left_index=True,right_index=True)
return myDataFrame
@staticmethod
def LHCFillsByTime2pnd(t1,t2):
'''
LHCFillsByTime2pnd(t1,t2)
This function return two PANDAS dataframes.
The first dataframe contains the fills in the specified time interval t1-t2.
The second dataframe contains the fill modes in the specified time interval t1-t2.
'''
DATA=log.getLHCFillsByTime(t1,t2)
fillNumerList=[]
startTimeList=[]
endTimeList=[]
beamModesList=[]
ST=[]
ET=[]
FN=[]
for i in DATA:
FN.append(i['fillNumber'])
ST.append(i['startTime'])
ET.append(i['endTime'])
for j in i['beamModes']:
beamModesList.append(j['mode'])
fillNumerList.append(i['fillNumber'])
startTimeList.append(j['startTime'])
endTimeList.append(j['endTime'])
auxDataFrame=pnd.DataFrame({})
auxDataFrame['mode']=pnd.Series(beamModesList, fillNumerList)
auxDataFrame['startTime']=pnd.Series(myToolbox.unixtime2datetimeVectorize(startTimeList), fillNumerList)
if endTimeList[-1:]==[None]:
endTimeList[-1:]=[time.time()]
print('One fill is not yet ended. We replaced the "None" in endTime with the "now" time (GVA time).')
if ET[-1:]==[None]:
ET[-1:]=[time.time()]
print('One fill is not yet ended. We replaced the "None" in ET with the "now" time (GVA time).')
auxDataFrame['endTime']=pnd.Series(myToolbox.unixtime2datetimeVectorize(endTimeList), fillNumerList)
auxDataFrame['duration']=auxDataFrame['endTime']-auxDataFrame['startTime']
aux=pnd.DataFrame({})
aux['startTime']=pnd.Series(myToolbox.unixtime2datetimeVectorize(ST), FN)
aux['endTime']=pnd.Series(myToolbox.unixtime2datetimeVectorize(ET), FN)
aux['duration']=aux['endTime']-aux['startTime']
return aux, auxDataFrame
@staticmethod
def addRowsFromCals(myDF, deltaTime=datetime.timedelta(minutes=2)):
"""This method extend in time a pandas dataframe using the CALS database.
It returns a new pandas dataframe. It hase 2 arguments:
myDF: the initial dataframe
deltaTime=datetime.timedelta(minutes=2): the delta of time to apply"""
aux=myToolbox.cals2pnd(list(myDF),myDF.index[-1],myDF.index[-1]+deltaTime )
myDF=pnd.concat([myDF,aux])
return myDF
@staticmethod
def addColumnsFromCals(myDF, listOfVariables):
"""This method add a list of variables to a pandas dataframe using the CALS database.
It returns a new pandas dataframe. It hase 2 arguments:
myDF: the initial dataframe
listOfVariable: the list of variables to add"""
aux=myToolbox.cals2pnd(listOfVariables,myDF.index[0],myDF.index[-1])
myDF=pnd.concat([myDF,aux])
return myDF
@staticmethod
def time_now():
return datetime.datetime.now()
@staticmethod
def time_1_week_ago(weeks=1):
fromTime=datetime.datetime.now()
return fromTime-datetime.timedelta(weeks=weeks)
@staticmethod
def time_1_day_ago(days=1):
fromTime=datetime.datetime.now()
return fromTime-datetime.timedelta(days=days)
@staticmethod
def time_5_minutes_ago(minutes=5):
fromTime=datetime.datetime.now()
return fromTime-datetime.timedelta(minutes=minutes)
@staticmethod
def time_1_hour_ago(hours=1):
fromTime=datetime.datetime.now()
return fromTime-datetime.timedelta(hours=hours)
@staticmethod
def setXlabel(ax, hours=1., myFormat='%H:%M', startDatetime=datetime.datetime.utcfromtimestamp(0)):
"""
setXlabel(ax=plt.gca(), hours=1., myFormat='%H:%M', startDatetime=datetime.datetime.utcfromtimestamp(0))
ax: specify the axis
hours: specify the interval
myFormat: specify the format
startDatetime: specify the starting time (to have round captions)
"""
aux=ax.get_xlim()
serial = aux[0]
if startDatetime ==datetime.datetime.utcfromtimestamp(0):
seconds = (serial - 719163.0) * 86400.0
startDatetime=datetime.datetime.utcfromtimestamp(seconds)
serial = aux[1]
seconds = (serial - 719163.0) * 86400.0
date_end=datetime.datetime.utcfromtimestamp(seconds)
t = np.arange(startDatetime, date_end, datetime.timedelta(hours=hours)).astype(datetime.datetime)
ax.set_xticks(t);
myFmt =mdates.DateFormatter(myFormat)
ax.xaxis.set_major_formatter(myFmt);
return startDatetime
@staticmethod
def setArrowLabel(ax, label='myLabel',arrowPosition=(0,0),labelPosition=(0,0), myColor='k', arrowArc_rad=-0.2):
return ax.annotate(label,
xy=arrowPosition, xycoords='data',
xytext=labelPosition, textcoords='data',
size=10, color=myColor,va="center", ha="center",
bbox=dict(boxstyle="round4", fc="w",color=myColor,lw=2),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3,rad="+str(arrowArc_rad),
fc="w", color=myColor,lw=2),
)
@staticmethod
def setShadedRegion(ax,color='g' ,xLimit=[0,1],alpha=.1):
"""
setHighlightedRegion(ax,color='g' ,xLimit=[0,1],alpha=.1)
ax: plot axis to use
color: color of the shaded region
xLimit: vector with two scalars, the start and the end point
alpha: transparency settings
"""
aux=ax.get_ylim()
plt.gca().fill_between(xLimit,
[aux[0],aux[0]], [aux[1],aux[1]],color=color, alpha=alpha)
@staticmethod
def mergeDF(df1,df2):
"""
It returns a new dataframe obtained by merging df1 and df2 with no duplicated columns.
mergeDF(df1,df2)
return pnd.merge(df1,df2[df2.columns.difference(df1.columns)],
left_index=True,
right_index=True,
how='outer')
"""
return pnd.merge(df1,df2[df2.columns.difference(df1.columns)],
left_index=True,
right_index=True,
how='outer')
@staticmethod
def concatDF(df1,df2):
"""
It returns a new dataframe that is the concatation of df1 and df2.
def concatDF(df1,df2):
aux=pnd.concat([df1,df2]).sort_index()
return aux.groupby(aux.index).first() #to avoid duplicate
"""
aux=pnd.concat([df1,df2]).sort_index()
return aux.groupby(aux.index).first()
@staticmethod
def indexesFromCALS(indexesList, variables,verbose=False):
myDF=pnd.DataFrame()
for i in indexesList:
if verbose:
print(i)
aux=myToolbox.cals2pnd(variables,i,i)
myDF=myToolbox.concatDF(myDF,aux)
return myDF
|
sterbini/MDToolbox
|
myToolbox.py
|
Python
|
mit
| 35,677
|
[
"Gaussian"
] |
e1e818ab3b489a9743647570d3fbeb09b07b90a1de8c2fcf5ffc111d1fa31dc4
|
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import string
import re
from commoncode.text import toascii
"""
Extract raw ASCII strings from (possibly) binary strings.
Both plain ASCII and UTF-16-LE-encoded (aka. wide) strings are extracted.
The later is found typically in some Windows PEs.
This is more or less similar to what GNU Binutils strings does.
Does not recognize and extract non-ASCII characters
Some alternative and references:
https://github.com/fireeye/flare-floss (also included)
http://stackoverflow.com/questions/10637055/how-do-i-extract-unicode-character-sequences-from-an-mz-executable-file
http://stackoverflow.com/questions/1324067/how-do-i-get-str-translate-to-work-with-unicode-strings
http://stackoverflow.com/questions/11066400/remove-punctuation-from-unicode-formatted-strings/11066687#11066687
https://github.com/TakahiroHaruyama/openioc_scan/blob/d7e8c5962f77f55f9a5d34dbfd0799f8c57eff7f/openioc_scan.py#L184
"""
# at least three characters are needed to consider some blob as a good string
MIN_LEN = 3
def strings_from_file(location, buff_size=1024 * 1024, ascii=False, clean=True, min_len=MIN_LEN):
"""
Yield unicode strings made only of ASCII characters found in file at location.
Process the file in chunks (to limit memory usage). If ascii is True, strings
are converted to plain ASCII "str or byte" strings instead of unicode.
"""
min_len = MIN_LEN
with open(location, 'rb') as f:
while 1:
buf = f.read(buff_size)
if not buf:
break
for s in strings_from_string(buf, clean=clean, min_len=min_len):
if ascii:
s = toascii(s)
s = s.strip()
if not s or len(s) < min_len:
continue
yield s
# Extracted text is digit, letters, punctuation and white spaces
punctuation = re.escape(string.punctuation)
whitespaces = ' \t\n\r'
printable = 'A-Za-z0-9' + whitespaces + punctuation
null_byte = '\x00'
ascii_strings = re.compile(
# plain ASCII is a sequence of printable of a minimum length
'('
+ '[' + printable + ']'
+ '{' + str(MIN_LEN) + ',}'
+ ')'
# or utf-16-le-encoded ASCII is a sequence of ASCII+null byte
+ '|'
+ '('
+ '(?:' + '[' + printable + ']' + null_byte + ')'
+ '{' + str(MIN_LEN) + ',}'
+ ')'
).finditer
def strings_from_string(binary_string, clean=False, min_len=0):
"""
Yield strings extracted from a (possibly binary) string. The strings are ASCII
printable characters only. If clean is True, also clean and filter short and
repeated strings.
Note: we do not keep the offset of where a string was found (e.g. match.start).
"""
for match in ascii_strings(binary_string):
s = decode(match.group())
if s:
if clean:
for ss in clean_string(s, min_len=min_len):
yield ss
else:
yield s
def string_from_string(binary_string, clean=False, min_len=0):
"""
Return a unicode string string extracted from a (possibly binary) string,
removing all non printable characters.
"""
return u' '.join(strings_from_string(binary_string, clean, min_len))
def decode(s):
"""
Return a decoded unicode string from s or None if the string cannot be decoded.
"""
if '\x00' in s:
try:
return s.decode('utf-16-le')
except UnicodeDecodeError:
pass
else:
return s.decode('ascii')
remove_junk = re.compile('[' + punctuation + whitespaces + ']').sub
def clean_string(s, min_len=MIN_LEN,
junk=string.punctuation + string.digits + string.whitespace):
"""
Yield cleaned strings from string s if it passes some validity tests:
* not made of white spaces
* with a minimum length
* not made of only two repeated character
* not made of only of digits, punctuations and whitespaces
"""
s = s.strip()
def valid(st):
st = remove_junk('', st)
return (st and len(st) >= min_len
# ignore character repeats, e.g need more than two unique characters
and len(set(st.lower())) > 1
# ignore string made only of digit or punctuation
and not all(c in junk for c in st))
if valid(s):
yield s.strip()
#####################################################################################
# TODO: Strings classification
# Classify strings, detect junk, detect paths, symbols, demangle symbols, unescape
# http://code.activestate.com/recipes/466293-efficient-character-escapes-decoding/?in=user-2382677
def is_file(s):
"""
Return True if s looks like a file name.
Exmaple: dsdsd.dll
"""
filename = re.compile('^[\w_\-]+\.\w{1,4}$', re.IGNORECASE).match
return filename(s)
def is_shared_object(s):
"""
Return True if s looks like a shared object file.
Example: librt.so.1
"""
so = re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE).match
return so(s)
def is_posix_path(s):
"""
Return True if s looks like a posix path.
Example: /usr/lib/librt.so.1 or /usr/lib
"""
# TODO: implement me
posix = re.compile('^/[\w_\-].*$', re.IGNORECASE).match
posix(s)
return False
def is_relative_path(s):
"""
Return True if s looks like a relative posix path.
Example: usr/lib/librt.so.1 or ../usr/lib
"""
relative = re.compile('^(?:([^/]|\.\.)[\w_\-]+/.*$', re.IGNORECASE).match
return relative(s)
def is_win_path(s):
"""
Return True if s looks like a win path.
Example: c:\usr\lib\librt.so.1.
"""
winpath = re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE).match
return winpath(s)
def is_c_source(s):
"""
Return True if s looks like a C source path.
Example: this.c
FIXME: should get actual algo from contenttype.
"""
return s.endswith(('.c', '.cpp', '.hpp', '.h'))
def is_java_source(s):
"""
Return True if s looks like a Java source path.
Example: this.java
FIXME: should get actual algo from contenttype.
"""
return s.endswith(('.java', '.jsp', '.aj',))
def is_glibc_ref(s):
"""
Return True if s looks like a reference to GLIBC as typically found in
Elfs.
"""
return '@@GLIBC' in s
def is_java_ref(s):
"""
Return True if s looks like a reference to a java class or package in a
class file.
"""
jref = re.compile('^.*$', re.IGNORECASE).match
# TODO: implement me
jref(s)
return False
def is_win_guid(s):
"""
Return True if s looks like a windows GUID/APPID/CLSID.
"""
guid = re.compile('"\{[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}\}"', re.IGNORECASE).match
# TODO: implement me
guid(s)
return False
class BinaryStringsClassifier(object):
"""
Classify extracted strings as good or bad/junk.
The types of strings that are recognized include:
file
file_path
junk
text
"""
# TODO: Implement me
if __name__ == '__main__':
# also usable a simple command line script
import sys
location = sys.argv[1]
for s in strings_from_file(location):
print(s)
|
yasharmaster/scancode-toolkit
|
src/textcode/strings.py
|
Python
|
apache-2.0
| 8,700
|
[
"VisIt"
] |
d3468ff7688e949729512574e46e0b0737bf2bfc1df179588eaca1d0f5b0bfad
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this script tests vtkImageReslice with different slab modes
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataScalarTypeToUnsignedShort()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetDataOrigin(-100.8,-100.8,-70.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
caster = vtk.vtkImageCast()
caster.SetInputConnection(reader.GetOutputPort())
caster.SetOutputScalarTypeToFloat()
t1 = vtk.vtkTransform()
t1.RotateY(75)
t2 = vtk.vtkTransform()
t2.RotateZ(90)
reslice1 = vtk.vtkImageReslice()
reslice1.SetInputConnection(reader.GetOutputPort())
reslice1.SetSlabModeToMean()
reslice1.SetSlabNumberOfSlices(45)
reslice1.SetInterpolationModeToLinear()
reslice1.SetOutputDimensionality(2)
reslice1.SetOutputSpacing(3.2,3.2,1.5)
reslice1.SetOutputExtent(0,63,0,63,0,0)
reslice2 = vtk.vtkImageReslice()
reslice2.SetInputConnection(caster.GetOutputPort())
reslice2.SetSlabModeToSum()
reslice2.SetSlabNumberOfSlices(93)
reslice2.SetInterpolationModeToLinear()
reslice2.SetResliceAxes(t1.GetMatrix())
reslice2.SetOutputDimensionality(2)
reslice2.SetOutputSpacing(3.2,3.2,1.5)
reslice2.SetOutputExtent(0,63,0,63,0,0)
reslice3 = vtk.vtkImageReslice()
reslice3.SetInputConnection(reader.GetOutputPort())
reslice3.SetSlabModeToMax()
reslice3.SetInterpolationModeToLinear()
reslice3.SetSlabNumberOfSlices(50)
reslice3.SetResliceAxes(t1.GetMatrix())
reslice3.SetResliceTransform(t2)
reslice3.SetOutputDimensionality(2)
reslice3.SetOutputSpacing(3.2,3.2,1.5)
reslice3.SetOutputExtent(0,63,0,63,0,0)
reslice4 = vtk.vtkImageReslice()
reslice4.SetInputConnection(reader.GetOutputPort())
reslice4.SetSlabModeToMin()
reslice4.SetSlabNumberOfSlices(11)
reslice4.SetInterpolationModeToCubic()
reslice4.SetResliceTransform(t2)
reslice4.SetOutputDimensionality(2)
reslice4.SetOutputSpacing(3.2,3.2,1.5)
reslice4.SetOutputExtent(0,63,0,63,0,0)
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(reslice1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(reslice2.GetOutputPort())
mapper2.SetColorWindow(50000)
mapper2.SetColorLevel(100000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(reslice3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(reslice4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.5,0.0,1.0,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.0,0.5,0.5)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.5,0.5,1.0,1.0)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.0,0.5,0.5,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.SetSize(150,128)
imgWin.Render()
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/ResliceSlabModes.py
|
Python
|
gpl-3.0
| 3,551
|
[
"VTK"
] |
46c8ebb65a9c5bc88cb626f2a95795b1383f433263df2136c71fe5b2eac88727
|
### All lines that are commented out (and some that aren't) are optional ###
DB_ENGINE = 'sqlite:///db.sqlite'
#DB_ENGINE = 'mysql://user:pass@localhost/monocle'
#DB_ENGINE = 'postgresql://user:pass@localhost/monocle
AREA_NAME = 'SLC' # the city or region you are scanning
LANGUAGE = 'EN' # ISO 639-1 codes EN, DE, ES, FR, IT, JA, KO, PT, or ZH for Pokémon/move names
MAX_CAPTCHAS = 100 # stop launching new visits if this many CAPTCHAs are pending
SCAN_DELAY = 10 # wait at least this many seconds before scanning with the same account
SPEED_UNIT = 'miles' # valid options are 'miles', 'kilometers', 'meters'
SPEED_LIMIT = 19.5 # limit worker speed to this many SPEED_UNITs per hour
# The number of simultaneous workers will be these two numbers multiplied.
# On the initial run, workers will arrange themselves in a grid across the
# rectangle you defined with MAP_START and MAP_END.
# The rows/columns will also be used for the dot grid in the console output.
# Provide more accounts than the product of your grid to allow swapping.
GRID = (4, 4) # rows, columns
# the corner points of a rectangle for your workers to spread out over before
# any spawn points have been discovered
MAP_START = (40.7913, -111.9398)
MAP_END = (40.7143, -111.8046)
# do not visit spawn points outside of your MAP_START and MAP_END rectangle
# the boundaries will be the rectangle created by MAP_START and MAP_END, unless
STAY_WITHIN_MAP = True
# ensure that you visit within this many meters of every part of your map during bootstrap
# lower values are more thorough but will take longer
BOOTSTRAP_RADIUS = 120
GIVE_UP_KNOWN = 75 # try to find a worker for a known spawn for this many seconds before giving up
GIVE_UP_UNKNOWN = 60 # try to find a worker for an unknown point for this many seconds before giving up
SKIP_SPAWN = 90 # don't even try to find a worker for a spawn if the spawn time was more than this many seconds ago
# How often should the mystery queue be reloaded (default 90s)
# this will reduce the grouping of workers around the last few mysteries
#RESCAN_UNKNOWN = 90
# filename of accounts CSV
ACCOUNTS_CSV = 'accounts.csv'
# the directory that the pickles folder, socket, CSV, etc. will go in
# defaults to working directory if not set
#DIRECTORY = None
# Limit the number of simultaneous logins to this many at a time.
# Lower numbers will increase the amount of time it takes for all workers to
# get started but are recommended to avoid suddenly flooding the servers with
# accounts and arousing suspicion.
SIMULTANEOUS_LOGINS = 4
# Limit the number of workers simulating the app startup process simultaneously.
SIMULTANEOUS_SIMULATION = 10
# Immediately select workers whose speed are below (SPEED_UNIT)p/h instead of
# continuing to try to find the worker with the lowest speed.
# May increase clustering if you have a high density of workers.
GOOD_ENOUGH = 0.1
# Seconds to sleep after failing to find an eligible worker before trying again.
SEARCH_SLEEP = 2.5
## alternatively define a Polygon to use as boundaries (requires shapely)
## if BOUNDARIES is set, STAY_WITHIN_MAP will be ignored
## more information available in the shapely manual:
## http://toblerity.org/shapely/manual.html#polygons
#from shapely.geometry import Polygon
#BOUNDARIES = Polygon(((40.799609, -111.948556), (40.792749, -111.887341), (40.779264, -111.838078), (40.761410, -111.817908), (40.728636, -111.805293), (40.688833, -111.785564), (40.689768, -111.919389), (40.750461, -111.949938)))
# key for Bossland's hashing server, otherwise the old hashing lib will be used
#HASH_KEY = '9d87af14461b93cb3605' # this key is fake
# Skip PokéStop spinning and egg incubation if your request rate is too high
# for your hashing subscription.
# e.g.
# 75/150 hashes available 35/60 seconds passed => fine
# 70/150 hashes available 30/60 seconds passed => throttle (only scan)
# value: how many requests to keep as spare (0.1 = 10%), False to disable
#SMART_THROTTLE = 0.1
# Swap the worker that has seen the fewest Pokémon every x seconds
# Defaults to whatever will allow every worker to be swapped within 6 hours
#SWAP_OLDEST = 300 # 5 minutes
# Only swap if it's been active for more than x minutes
#MINIMUM_RUNTIME = 10
### these next 6 options use more requests but look more like the real client
APP_SIMULATION = True # mimic the actual app's login requests
COMPLETE_TUTORIAL = True # complete the tutorial process and configure avatar for all accounts that haven't yet
INCUBATE_EGGS = True # incubate eggs if available
## encounter Pokémon to store IVs.
## valid options:
# 'all' will encounter every Pokémon that hasn't been already been encountered
# 'some' will encounter Pokémon if they are in ENCOUNTER_IDS or eligible for notification
# 'notifying' will encounter Pokémon that are eligible for notifications
# None will never encounter Pokémon
ENCOUNTER = None
#ENCOUNTER_IDS = (3, 6, 9, 45, 62, 71, 80, 85, 87, 89, 91, 94, 114, 130, 131, 134)
# PokéStops
SPIN_POKESTOPS = True # spin all PokéStops that are within range
SPIN_COOLDOWN = 300 # spin only one PokéStop every n seconds (default 300)
# minimum number of each item to keep if the bag is cleaned
# bag cleaning is disabled if this is not present or is commented out
''' # triple quotes are comments, remove them to use this ITEM_LIMITS example
ITEM_LIMITS = {
1: 20, # Poké Ball
2: 50, # Great Ball
3: 100, # Ultra Ball
101: 0, # Potion
102: 0, # Super Potion
103: 0, # Hyper Potion
104: 40, # Max Potion
201: 0, # Revive
202: 40, # Max Revive
701: 20, # Razz Berry
702: 20, # Bluk Berry
703: 20, # Nanab Berry
704: 20, # Wepar Berry
705: 20, # Pinap Berry
}
'''
# Update the console output every x seconds
REFRESH_RATE = 0.75 # 750ms
# Update the seen/speed/visit/speed stats every x seconds
STAT_REFRESH = 5
# sent with GET_PLAYER requests, should match your region
PLAYER_LOCALE = {'country': 'US', 'language': 'en', 'timezone': 'America/Denver'}
# retry a request after failure this many times before giving up
MAX_RETRIES = 3
# number of seconds before timing out on a login request
LOGIN_TIMEOUT = 2.5
# add spawn points reported in cell_ids to the unknown spawns list
#MORE_POINTS = False
# Set to True to kill the scanner when a newer version is forced
#FORCED_KILL = False
# exclude these Pokémon from the map by default (only visible in trash layer)
TRASH_IDS = (
16, 19, 21, 29, 32, 41, 46, 48, 50, 52, 56, 74, 77, 96, 111, 133,
161, 163, 167, 177, 183, 191, 194
)
# include these Pokémon on the "rare" report
RARE_IDS = (3, 6, 9, 45, 62, 71, 80, 85, 87, 89, 91, 94, 114, 130, 131, 134)
from datetime import datetime
REPORT_SINCE = datetime(2017, 2, 17) # base reports on data from after this date
# used for altitude queries and maps in reports
#GOOGLE_MAPS_KEY = 'OYOgW1wryrp2RKJ81u7BLvHfYUA6aArIyuQCXu4' # this key is fake
REPORT_MAPS = True # Show maps on reports
#ALT_RANGE = (1250, 1450) # Fall back to altitudes in this range if Google query fails
## Round altitude coordinates to this many decimal places
## More precision will lead to larger caches and more Google API calls
## Maximum distance from coords to rounded coords for precisions (at Lat40):
## 1: 7KM, 2: 700M, 3: 70M, 4: 7M
#ALT_PRECISION = 2
## Automatically resolve captchas using 2Captcha key.
#CAPTCHA_KEY = '1abc234de56fab7c89012d34e56fa7b8'
## the number of CAPTCHAs an account is allowed to receive before being swapped out
#CAPTCHAS_ALLOWED = 3
## Get new accounts from the CAPTCHA queue first if it's not empty
#FAVOR_CAPTCHA = True
# allow displaying the live location of workers on the map
MAP_WORKERS = True
# filter these Pokemon from the map to reduce traffic and browser load
#MAP_FILTER_IDS = [161, 165, 16, 19, 167]
# unix timestamp of last spawn point migration, spawn times learned before this will be ignored
LAST_MIGRATION = 1481932800 # Dec. 17th, 2016
# Treat a spawn point's expiration time as unknown if nothing is seen at it on more than x consecutive visits
FAILURES_ALLOWED = 2
## Map data provider and appearance, previews available at:
## https://leaflet-extras.github.io/leaflet-providers/preview/
#MAP_PROVIDER_URL = '//{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'
#MAP_PROVIDER_ATTRIBUTION = '© <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors'
# set of proxy addresses and ports
# SOCKS requires aiosocks to be installed
#PROXIES = {'http://127.0.0.1:8080', 'https://127.0.0.1:8443', 'socks5://127.0.0.1:1080'}
# convert spawn_id to integer for more efficient DB storage, set to False if
# using an old database since the data types are incompatible.
#SPAWN_ID_INT = True
# Bytestring key to authenticate with manager for inter-process communication
#AUTHKEY = b'm3wtw0'
# Address to use for manager, leave commented if you're not sure.
#MANAGER_ADDRESS = r'\\.\pipe\monocle' # must be in this format for Windows
#MANAGER_ADDRESS = 'monocle.sock' # the socket name for Unix systems
#MANAGER_ADDRESS = ('127.0.0.1', 5002) # could be used for CAPTCHA solving and live worker maps on remote systems
# Store the cell IDs so that they don't have to be recalculated every visit.
# Enabling will (potentially drastically) increase memory usage.
#CACHE_CELLS = False
# Only for use with web_sanic (requires PostgreSQL)
#DB = {'host': '127.0.0.1', 'user': 'monocle_role', 'password': 'pik4chu', 'port': '5432', 'database': 'monocle'}
# Disable to use Python's event loop even if uvloop is installed
#UVLOOP = True
# The number of coroutines that are allowed to run simultaneously.
#COROUTINES_LIMIT = GRID[0] * GRID[1]
### FRONTEND CONFIGURATION
LOAD_CUSTOM_HTML_FILE = False # File path MUST be 'templates/custom.html'
LOAD_CUSTOM_CSS_FILE = False # File path MUST be 'static/css/custom.css'
LOAD_CUSTOM_JS_FILE = False # File path MUST be 'static/js/custom.js'
#FB_PAGE_ID = None
#TWITTER_SCREEN_NAME = None # Username withouth '@' char
#DISCORD_INVITE_ID = None
#TELEGRAM_USERNAME = None # Username withouth '@' char
## Variables below will be used as default values on frontend
FIXED_OPACITY = False # Make marker opacity independent of remaining time
SHOW_TIMER = False # Show remaining time on a label under each pokemon marker
### OPTIONS BELOW THIS POINT ARE ONLY NECESSARY FOR NOTIFICATIONS ###
NOTIFY = False # enable notifications
# create images with Pokémon image and optionally include IVs and moves
# requires cairo and ENCOUNTER = 'notifying' or 'all'
TWEET_IMAGES = True
# IVs and moves are now dependant on level, so this is probably not useful
IMAGE_STATS = False
# As many hashtags as can fit will be included in your tweets, these will
# be combined with landmark-specific hashtags (if applicable).
HASHTAGS = {AREA_NAME, 'Monocle', 'PokemonGO'}
#TZ_OFFSET = 0 # UTC offset in hours (if different from system time)
# the required number of seconds remaining to notify about a Pokémon
TIME_REQUIRED = 600 # 10 minutes
### Only set either the NOTIFY_RANKING or NOTIFY_IDS, not both!
# The (x) rarest Pokémon will be eligible for notification. Whether a
# notification is sent or not depends on its score, as explained below.
NOTIFY_RANKING = 90
# Pokémon to potentially notify about, in order of preference.
# The first in the list will have a rarity score of 1, the last will be 0.
#NOTIFY_IDS = (130, 89, 131, 3, 9, 134, 62, 94, 91, 87, 71, 45, 85, 114, 80, 6)
# Sightings of the top (x) will always be notified about, even if below TIME_REQUIRED
# (ignored if using NOTIFY_IDS instead of NOTIFY_RANKING)
ALWAYS_NOTIFY = 14
# Always notify about the following Pokémon even if their time remaining or scores are not high enough
#ALWAYS_NOTIFY_IDS = {89, 130, 144, 145, 146, 150, 151}
# Never notify about the following Pokémon, even if they would otherwise be eligible
#NEVER_NOTIFY_IDS = TRASH_IDS
# Override the rarity score for particular Pokémon
# format is: {pokemon_id: rarity_score}
#RARITY_OVERRIDE = {148: 0.6, 149: 0.9}
# Ignore IV score and only base decision on rarity score (default if IVs not known)
#IGNORE_IVS = False
# Ignore rarity score and only base decision on IV score
#IGNORE_RARITY = False
# The Pokémon score required to notify goes on a sliding scale from INITIAL_SCORE
# to MINIMUM_SCORE over the course of FULL_TIME seconds following a notification
# Pokémon scores are an average of the Pokémon's rarity score and IV score (from 0 to 1)
# If NOTIFY_RANKING is 90, the 90th most common Pokémon will have a rarity of score 0, the rarest will be 1.
# IV score is the IV sum divided by 45 (perfect IVs).
FULL_TIME = 1800 # the number of seconds after a notification when only MINIMUM_SCORE will be required
INITIAL_SCORE = 0.7 # the required score immediately after a notification
MINIMUM_SCORE = 0.4 # the required score after FULL_TIME seconds have passed
### The following values are fake, replace them with your own keys to enable
### notifications, otherwise exclude them from your config
### You must provide keys for at least one service to use notifications.
#PB_API_KEY = 'o.9187cb7d5b857c97bfcaa8d63eaa8494'
#PB_CHANNEL = 0 # set to the integer of your channel, or to None to push privately
#TWITTER_CONSUMER_KEY = '53d997264eb7f6452b7bf101d'
#TWITTER_CONSUMER_SECRET = '64b9ebf618829a51f8c0535b56cebc808eb3e80d3d18bf9e00'
#TWITTER_ACCESS_KEY = '1dfb143d4f29-6b007a5917df2b23d0f6db951c4227cdf768b'
#TWITTER_ACCESS_SECRET = 'e743ed1353b6e9a45589f061f7d08374db32229ec4a61'
## Telegram bot token is the one Botfather sends to you after completing bot creation.
## Chat ID can be two different values:
## 1) '@channel_name' for channels
## 2) Your chat_id if you will use your own account. To retrieve your ID, write to your bot and check this URL:
## https://api.telegram.org/bot<BOT_TOKEN_HERE>/getUpdates
#TELEGRAM_BOT_TOKEN = '123456789:AA12345qT6QDd12345RekXSQeoZBXVt-AAA'
#TELEGRAM_CHAT_ID = '@your_channel'
#WEBHOOKS = {'http://127.0.0.1:4000'}
##### Referencing landmarks in your tweets/notifications
#### It is recommended to store the LANDMARKS object in a pickle to reduce startup
#### time if you are using queries. An example script for this is in:
#### scripts/pickle_landmarks.example.py
#from pickle import load
#with open('pickles/landmarks.pickle', 'rb') as f:
# LANDMARKS = load(f)
### if you do pickle it, just load the pickle and omit everything below this point
#from monocle.landmarks import Landmarks
#LANDMARKS = Landmarks(query_suffix=AREA_NAME)
# Landmarks to reference when Pokémon are nearby
# If no points are specified then it will query OpenStreetMap for the coordinates
# If 1 point is provided then it will use those coordinates but not create a shape
# If 2 points are provided it will create a rectangle with its corners at those points
# If 3 or more points are provided it will create a polygon with vertices at each point
# You can specify the string to search for on OpenStreetMap with the query parameter
# If no query or points is provided it will query with the name of the landmark (and query_suffix)
# Optionally provide a set of hashtags to be used for tweets about this landmark
# Use is_area for neighborhoods, regions, etc.
# When selecting a landmark, non-areas will be chosen first if any are close enough
# the default phrase is 'in' for areas and 'at' for non-areas, but can be overriden for either.
### replace these with well-known places in your area
## since no points or query is provided, the names provided will be queried and suffixed with AREA_NAME
#LANDMARKS.add('Rice Eccles Stadium', shortname='Rice Eccles', hashtags={'Utes'})
#LANDMARKS.add('the Salt Lake Temple', shortname='the temple', hashtags={'TempleSquare'})
## provide two corner points to create a square for this area
#LANDMARKS.add('City Creek Center', points=((40.769210, -111.893901), (40.767231, -111.888275)), hashtags={'CityCreek'})
## provide a query that is different from the landmark name so that OpenStreetMap finds the correct one
#LANDMARKS.add('the State Capitol', shortname='the Capitol', query='Utah State Capitol Building')
### area examples ###
## query using name, override the default area phrase so that it says 'at (name)' instead of 'in'
#LANDMARKS.add('the University of Utah', shortname='the U of U', hashtags={'Utes'}, phrase='at', is_area=True)
## provide corner points to create a polygon of the area since OpenStreetMap does not have a shape for it
#LANDMARKS.add('Yalecrest', points=((40.750263, -111.836502), (40.750377, -111.851108), (40.751515, -111.853833), (40.741212, -111.853909), (40.741188, -111.836519)), is_area=True)
|
sebast1219/Monocle
|
config.example.py
|
Python
|
mit
| 16,723
|
[
"VisIt"
] |
7e5f4a2422a2813adb9547e43e273519ba7c1c5c996d12919050e8257c2ac80d
|
"""
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
|
RPGOne/Skynet
|
scikit-learn-0.18.1/examples/datasets/plot_random_dataset.py
|
Python
|
bsd-3-clause
| 2,254
|
[
"Gaussian"
] |
1f2d258b7bb117fb25c445e02c1b6fb090c7a4daf63714caa6e5b9ce368e9c15
|
tests = [
("python", "test_list.py", {'dir': 'ML'}),
("python", "test_list.py", {'dir': 'Chem'}),
("python", "test_list.py", {'dir': 'DataStructs'}),
("python", "test_list.py", {'dir': 'Dbase'}),
("python", "test_list.py", {'dir': 'SimDivFilters'}),
("python", "test_list.py", {'dir': 'VLib'}),
("python", "test_list.py", {'dir': 'utils'}),
("python", "test_list.py", {'dir': 'sping'}),
]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
|
rvianello/rdkit
|
rdkit/test_list.py
|
Python
|
bsd-3-clause
| 578
|
[
"RDKit"
] |
fdf32d85bfb93e777b68fdaec205c5cc39c617249a163324bdf4cc156dca0c70
|
# -*- coding: utf-8 -*-
"""
sphinx.apidoc
~~~~~~~~~~~~~
Parses a directory tree looking for Python modules and packages and creates
ReST files appropriately to create code documentation with Sphinx. It also
creates a modules index (named modules.<suffix>).
This is derived from the "sphinx-autopackage" script, which is:
Copyright 2008 Société des arts technologiques (SAT),
http://www.sat.qc.ca/
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import string
import sys
import optparse
import traceback as tb
from os.path import join
from sphinx.util.osutil import walk
from sphinx import __version__
# automodule options
if 'SPHINX_APIDOC_OPTIONS' in os.environ:
OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',')
else:
OPTIONS = [
'members',
'undoc-members',
# 'inherited-members', # disabled because there's a bug in sphinx
'show-inheritance',
]
INITPY = '__init__.py'
PY_SUFFIXES = set(['.py', '.pyx'])
def makename(package, module):
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def wrapped_print(msg, opts):
if not opts.quiet:
print(msg)
def write_file(name, text, opts):
"""Write the output file for module/package <name>."""
fname = join(opts.destdir, '%s.%s' % (name, opts.suffix))
if opts.dryrun:
wrapped_print('Would create file %s.' % fname, opts)
return
if not opts.force and os.path.isfile(fname):
wrapped_print('File %s already exists, skipping.' % fname, opts)
else:
wrapped_print('Creating file %s.' % fname, opts)
f = open(fname, 'w')
try:
f.write(text)
finally:
f.close()
def format_heading(level, text):
"""Create a heading of <level> [1, 2 or 3 supported]."""
underlining = ['=', '-', '~', ][level - 1] * len(text)
return '%s\n%s\n\n' % (text, underlining)
def format_directive(module, package=None):
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
directive += ' :%s:\n' % option
return directive
def create_module_file(package, module, opts):
"""Build the text of the file and write the file."""
if not opts.noheadings:
text = format_heading(1, '%s module' % module)
else:
text = ''
#text += format_heading(2, ':mod:`%s` Module' % module)
text += format_directive(module, package)
write_file(makename(package, module), text, opts)
def create_package_file(master_package, subroot, submods, opts, subs):
"""Build the text of the file and write the file."""
text = format_heading(1, '%s package' % makename(master_package, subroot))
if opts.modulefirst:
text += format_directive(subroot, master_package)
text += '\n'
# if there are some package directories, add a TOC for theses subpackages
if subs:
text += format_heading(2, 'Subpackages')
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (makename(master_package, subroot), sub)
text += '\n'
if submods:
text += format_heading(2, 'Submodules')
if opts.separatemodules:
text += '.. toctree::\n\n'
for submod in submods:
modfile = makename(master_package, makename(subroot, submod))
text += ' %s\n' % modfile
# generate separate file for this module
if not opts.noheadings:
filetext = format_heading(1, '%s module' % modfile)
else:
filetext = ''
filetext += format_directive(makename(subroot, submod),
master_package)
write_file(modfile, filetext, opts)
else:
for submod in submods:
modfile = makename(master_package, makename(subroot, submod))
if not opts.noheadings:
text += format_heading(2, '%s module' % modfile)
text += format_directive(makename(subroot, submod),
master_package)
text += '\n'
text += '\n'
if not opts.modulefirst:
text += format_heading(2, 'Module contents')
text += format_directive(subroot, master_package)
write_file(makename(master_package, subroot), text, opts)
def create_modules_toc_file(modules, opts, name='modules'):
"""Create the module's index."""
text = format_heading(1, '%s' % opts.header)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
write_file(name, text, opts)
def walk_dir_tree(rootpath, excludes, opts):
""" Walk the directory tree and create the corresponding ReST files as
dictated by the options.
"""
toplevels = []
if has_initpy(rootpath):
root_package = rootpath.split(os.sep)[-1]
else:
# Generate .rst files for the top level modules even if we are
# not in a package (this is a one time exception)
root_package = None
mods = get_modules(os.listdir(rootpath), excludes, opts, rootpath)
for module in mods:
create_module_file(root_package, module, opts)
toplevels.append(module)
# Do the actual directory tree walk
pkgname_mods_subpkgs = pkgname_modules_subpkgs(rootpath, excludes, opts)
for pkgname, mods, subpkgs in pkgname_mods_subpkgs:
create_package_file(root_package, pkgname, mods, opts, subpkgs)
toplevels.append(makename(root_package, pkgname))
return toplevels
def has_initpy(directory):
return os.path.isfile( join(directory, INITPY) )
def pkgname_modules_subpkgs(rootpath, excluded, opts):
"""A generator that filters out the packages and modules as desired and
yields tuples of (package name, modules, subpackages).
"""
for root, dirs, files in walk(rootpath, followlinks=opts.followlinks):
if root in excluded:
del dirs[:] # skip all subdirectories as well
continue
if INITPY not in files:
if root != rootpath:
del dirs[:]
continue
pkg_name = root[len(rootpath):].lstrip(os.sep).replace(os.sep, '.')
if not opts.includeprivate and pkg_name.startswith('_'):
del dirs[:]
continue
modules = get_modules(files, excluded, opts, root)
subpkgs = get_subpkgs(dirs, excluded, opts, root, rootpath)
dirs[:] = subpkgs # visit only subpackages
has_sg_to_doc = True
if opts.respect_all:
all_attr, has_docstr = get_all_attr_has_docstr(rootpath, root, opts)
has_sg_to_doc = has_docstr or bool(all_attr)
# has_sg_to_doc: e.g. multiprocessing.dummy has nonempty __all__ but
# no modules, subpkgs or docstring to document -> still document it!
modules = get_only_modules(all_attr, modules)
if modules or subpkgs or has_sg_to_doc:
yield pkg_name, modules, subpkgs
def get_modules(files, excluded, opts, root):
"""Filter out and sort the considered python modules from files."""
return sorted( os.path.splitext(f)[0] for f in files
if os.path.splitext(f)[1] in PY_SUFFIXES and
norm_path(root, f) not in excluded and
f != INITPY and
(not f.startswith('_') or opts.includeprivate) )
def get_subpkgs(dirs, excluded, opts, root, rootpath):
"""Filter out and sort the considered subpackages from dirs."""
exclude_prefixes = ('.',) if opts.includeprivate else ('.', '_')
return sorted( d for d in dirs
if not d.startswith(exclude_prefixes) and
norm_path(root, d) not in excluded and
has_initpy( join(root, d) ) and
pkg_to_doc(opts, root, d, rootpath) )
def pkg_to_doc(opts, root, d, rootpath):
if not opts.respect_all:
return True
all_attr, has_docstr = get_all_attr_has_docstr(rootpath, join(root,d), opts)
return all_attr is None or bool(all_attr) or has_docstr
def get_only_modules(all_attr, modules):
"""If ``__all__`` is not present in ``__init__.py``, we take all the modules
in the current directory. Otherwise, we only keep those element of
``__all__`` that are also modules in the current directory."""
if all_attr is None:
return modules
mods = set(modules)
return [m for m in all_attr if m in mods]
def get_all_attr_has_docstr(rootpath, path, opts, cached={}):
"""Returns a tuple: the ``__all__`` attribute of the package as a list
(``None`` if ``__all__`` is not present) and a ``bool`` indicating whether
the module has a doc string. Calls ``sys.exit`` on failure
(e.g. ``ImportError``), unless the --ignore-errors flag is used. Returns
``(None, False)`` on ignored error. A simple-minded caching is used as we
look at each package twice.
"""
if path in cached:
return cached[path]
try:
path_before = list(sys.path)
modules_before = set(sys.modules)
head, pkg = find_top_package(rootpath, path)
sys.path.append(head) # Prepend or append?
__import__(pkg) # for Python 2.6 compatibility
module = sys.modules[pkg]
all_attrib = get_all_from(module)
# cairo and zope has __doc__ but it is None
has_docstring = getattr(module, '__doc__', None) is not None
cached[path] = (all_attrib, has_docstring)
return cached[path]
except AssertionError:
raise
except:
print('\n', tb.format_exc().rstrip(), file=sys.stderr)
print('Please make sure that the package \'%s\' can be imported (or use'
' --ignore-errors\nor exclude %s).' % (pkg,path), file=sys.stderr)
if not opts.ignore_errors:
sys.exit(1)
finally:
difference = sys.modules.viewkeys() - modules_before
for k in difference:
sys.modules.pop(k)
sys.path = path_before
# We only get here if there was an ignored error, for example on ImportError
cached[path] = (None, False)
return cached[path]
def find_top_package(root, path):
"""Walks up in the directory hierarchy to find the top level package or
until hitting the root.
"""
# For example with:
# root = '/usr/lib/python2.7'
# path = '/usr/lib/python2.7/dist-packages/scipy/sparse/linalg/isolve'
# result: '/usr/lib/python2.7/dist-packages', 'scipy.sparse.linalg.isolve'
assert path.startswith(root), '\n%s\n%s' % (root, path)
assert has_initpy(path), path
roothead = os.path.dirname(root)
head, tail = os.path.split(path)
while roothead != head and has_initpy(head):
head, pkg = os.path.split(head)
tail = join(pkg, tail)
return head, string.replace(tail, os.sep, '.')
def get_all_from(module):
all_attr = getattr(module, '__all__', None)
# Some packages (for example dbus.mainloop.__init__.py) uses a tuple.
# Convert __all__ to list if necessary.
if all_attr is not None and not isinstance(all_attr, list):
all_attr = list(all_attr)
return all_attr
def norm_path(root, mod_or_dir):
return os.path.normpath( join(root, mod_or_dir) )
def main(argv=sys.argv):
"""Parse and check the command line arguments."""
parser = optparse.OptionParser(
usage="""\
usage: %prog [options] -o <output_path> <module_path> [exclude_path, ...]
Look recursively in <module_path> for Python modules and packages and create
one reST file with automodule directives per package in the <output_path>.
The <exclude_path>s can be files and/or directories that will be excluded
from generation.
Note: By default this script will not overwrite already created files.""")
parser.add_option('-o', '--output-dir', action='store', dest='destdir',
help='Directory to place all output', default='')
parser.add_option('-d', '--maxdepth', action='store', dest='maxdepth',
help='Maximum depth of submodules to show in the TOC '
'(default: 4)', type='int', default=4)
parser.add_option('-f', '--force', action='store_true', dest='force',
help='Overwrite existing files')
parser.add_option('-l', '--follow-links', action='store_true',
dest='followlinks', default=False,
help='Follow symbolic links. Powerful when combined '
'with collective.recipe.omelette.')
parser.add_option('-n', '--dry-run', action='store_true', dest='dryrun',
help='Run the script without creating files')
parser.add_option('-e', '--separate', action='store_true',
dest='separatemodules',
help='Put documentation for each module on its own page')
parser.add_option('-P', '--private', action='store_true',
dest='includeprivate',
help='Include "_private" modules')
parser.add_option('-T', '--no-toc', action='store_true', dest='notoc',
help='Don\'t create a table of contents file')
parser.add_option('-E', '--no-headings', action='store_true',
dest='noheadings',
help='Don\'t create headings for the module/package '
'packages (e.g. when the docstrings already contain '
'them)')
parser.add_option('-M', '--module-first', action='store_true',
dest='modulefirst',
help='Put module documentation before submodule '
'documentation')
parser.add_option('-s', '--suffix', action='store', dest='suffix',
help='file suffix (default: rst)', default='rst')
parser.add_option('-F', '--full', action='store_true', dest='full',
help='Generate a full project with sphinx-quickstart')
parser.add_option('-H', '--doc-project', action='store', dest='header',
help='Project name (default: root module name)')
parser.add_option('-A', '--doc-author', action='store', dest='author',
type='str',
help='Project author(s), used when --full is given')
parser.add_option('-V', '--doc-version', action='store', dest='version',
help='Project version, used when --full is given')
parser.add_option('-R', '--doc-release', action='store', dest='release',
help='Project release, used when --full is given, '
'defaults to --doc-version')
parser.add_option('--version', action='store_true', dest='show_version',
help='Show version information and exit')
parser.add_option('--respect-all', action='store_true',
dest='respect_all',
help='Respect __all__ when looking for modules')
parser.add_option('--quiet', action='store_true',
dest='quiet',
help='Do not show which files are created or skipped')
parser.add_option('--ignore-errors', action='store_true',
dest='ignore_errors',
help='Ignore import errors and continue')
(opts, args) = parser.parse_args(argv[1:])
if opts.show_version:
print('Sphinx (sphinx-apidoc) %s' % __version__)
return 0
if not args:
parser.error('A package path is required.')
rootpath, excludes = args[0], args[1:]
if not opts.destdir:
parser.error('An output directory is required.')
if opts.header is None:
opts.header = os.path.normpath(rootpath).split(os.sep)[-1]
if opts.suffix.startswith('.'):
opts.suffix = opts.suffix[1:]
if not os.path.isdir(rootpath):
print('%s is not a directory.' % rootpath, file=sys.stderr)
sys.exit(1)
if opts.includeprivate and opts.respect_all:
msg = 'Either --private or --respect-all but not both'
print(msg, file=sys.stderr)
sys.exit(1)
if opts.ignore_errors and not opts.respect_all:
msg = 'The --ignore-errors flag is only meaningful with --respect-all'
print(msg, file=sys.stderr)
sys.exit(1)
if not os.path.isdir(opts.destdir):
if not opts.dryrun:
os.makedirs(opts.destdir)
rootpath = os.path.normpath(os.path.abspath(rootpath))
excludes = { os.path.normpath(os.path.abspath(excl)) for excl in excludes }
modules = walk_dir_tree(rootpath, excludes, opts)
if opts.full:
modules.sort()
prev_module = ''
text = ''
for module in modules:
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
d = dict(
path = opts.destdir,
sep = False,
dot = '_',
project = opts.header,
author = opts.author or 'Author',
version = opts.version or '',
release = opts.release or opts.version or '',
suffix = '.' + opts.suffix,
master = 'index',
epub = True,
ext_autodoc = True,
ext_viewcode = True,
makefile = True,
batchfile = True,
mastertocmaxdepth = opts.maxdepth,
mastertoctree = text,
)
if not opts.dryrun:
from sphinx import quickstart as qs
qs.generate(d, silent=True, overwrite=opts.force)
elif not opts.notoc:
create_modules_toc_file(modules, opts)
if __name__ == '__main__':
main()
|
baharev/apidocfilter
|
hacked.py
|
Python
|
bsd-3-clause
| 18,605
|
[
"VisIt"
] |
4663ecfd8b00063d85ab8c9161365115d83fcf3f40a15dc39088a44f4505c8d8
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************
espressopp.esutil.Grid
**********************
"""
from espressopp import pmi
from _espressopp import esutil_Grid
class GridLocal(esutil_Grid):
pass
if pmi.isController:
class Grid(object):
__metaclass__ = pmi.Proxy
'Grid class'
pmiproxydefs = dict(
cls = 'espressopp.esutil.GridLocal',
localcall = [ 'mapIndexToPosition' ]
#localcall = [ '__call__', 'normal', 'gamma', 'uniformOnSphere' ],
#pmicall = [ 'seed' ]
)
|
fedepad/espressopp
|
src/esutil/Grid.py
|
Python
|
gpl-3.0
| 1,408
|
[
"ESPResSo"
] |
3c2adc70a8f5d1037548b6d2be1de2c140d7f32ba59c011918e8dd1e8905ecb1
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the "test" module for the validate_bom script.
It is responsible for coordinating and running the integration tests.
The test catalog is read from --test_profiles (default all_tests.yaml).
There are two parts to the catalog: "aliases" and "tests".
The "tests" is a dictionary of tests. Each entry is keyed by the name of the
test. A test has the following structure:
test_name:
requires:
configuration:
<commandline option>: <value>
services: [<microservice name>]
quota:
<resource>: <uses>
api: <primary microservice>
args:
alias: [<alias name>]
<command line flag>: <value>
The test_name.requires specifies the requirements in order to run the test.
If a requirement is not satisfied, the test will be skipped.
The test_name.requires.configuration specifies expected options and values.
These are the same names as parameters to the validate_bom__main executable.
Typically this is used to guard a test for a particular configuration (e.g.
dont test against a platform if the platform was not enabled in the
deployment).
The test_name.requires.services is a list of services that the test requires
either directly or indirectly. This is used to ensure that the services are
ready before running the test. If the service is alive but not healthy then
the test will be failed automatically without even running it (provided it
wouldnt have been skipped).
The test_name.api is used to specify the primary microservice that the test
uses. This is used to determine which port to pass to the test since the remote
ports are forwarded to unused local ports known only to this test controller.
The test_name.args are the commandline arguments to pass to the test.
The names of the arguments are the test's argument names without the
prefixed '--'. If the value begins with a '$' then the remaining value
refers to the name of an option whose value should become the argument.
A special argument "aliases" is a list of aliases. These are names that
match the key of an entry in the "aliases" part of the file where all the
name/value pairs defined for the alias are bulk added as arguments.
The test_name.quota is used to rate-limit test execution where tests are
sensitive to resource costs. Arbitrary names can be limited using
--test_quota. The controller will use this as a semaphore to rate-limit
test execution for these resources. Unrestricted resources wont rate-limit.
If the cost bigger than the total semaphore capacity then the test will
be given all the quota once all is available.
There is an overall rate-limiting semaphore on --test_concurrency for
how many tests can run at a time. This is enforced at the point of execution,
after all the setup and filtering has taken place.
"""
# pylint: disable=broad-except
from multiprocessing.pool import ThreadPool
import atexit
import collections
import logging
import math
import os
import re
import ssl
import subprocess
import socket
import threading
import time
import traceback
import yaml
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
from buildtool import (
add_parser_argument,
determine_subprocess_outcome_labels,
check_subprocess,
check_subprocesses_to_logfile,
raise_and_log_error,
ConfigError,
ResponseError,
TimeoutError,
UnexpectedError)
from validate_bom__deploy import replace_ha_services
from iap_generate_google_auth_token import (
generate_auth_token,
get_service_account_email)
ForwardedPort = collections.namedtuple('ForwardedPort', ['child', 'port'])
def _unused_port():
"""Find a port that is not currently in use."""
# pylint: disable=unused-variable
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
addr, port = sock.getsockname()
sock.close()
return port
class QuotaTracker(object):
"""Manages quota for individual resources.
Note that this quota tracker is purely logical. It does not relate to the
real world. Others may be using the actual quota we have. This is only
regulating the test's use of the quota.
"""
MAX_QUOTA_METRIC_NAME = 'ResourceQuotaMax'
FREE_QUOTA_METRIC_NAME = 'ResourceQuotaAvailable'
INSUFFICIENT_QUOTA_METRIC_NAME = 'ResourceQuotaShortage'
def __init__(self, max_counts, metrics):
"""Constructor.
Args:
max_counts: [dict] The list of resources and quotas to manage.
"""
self.__counts = dict(max_counts)
self.__max_counts = dict(max_counts)
self.__condition_variable = threading.Condition()
self.__metrics = metrics
for name, value in max_counts.items():
labels = {'resource': name}
self.__metrics.set(self.MAX_QUOTA_METRIC_NAME, labels, value)
self.__metrics.set(self.FREE_QUOTA_METRIC_NAME, labels, value)
def acquire_all_safe(self, who, quota):
"""Acquire the desired quota, if any.
This is thread-safe and will block until it can be satisified.
Args:
who: [string] Who is asking, for logging purposes.
quota: [dict] The desired quota for each keyed resource, if any.
Returns:
The quota acquired.
"""
got = None
with self.__condition_variable:
got = self.acquire_all_or_none_unsafe(who, quota)
while got is None:
logging.info('"%s" waiting on quota %s', who, quota)
self.__condition_variable.wait()
got = self.acquire_all_or_none_unsafe(who, quota)
return got
def acquire_all_or_none_safe(self, who, quota):
"""Acquire the desired quota, if any.
This is thread-safe, however will return None rather than block.
Args:
who: [string] Who is asking, for logging purposes.
quota: [dict] The desired quota for each keyed resource, if any.
Returns:
The quota acquired if successful, or None if not.
"""
with self.__condition_variable:
return self.acquire_all_or_none_unsafe(who, quota)
def acquire_all_or_none_unsafe(self, who, quota):
"""Acquire the desired quota, if any.
This is not thread-safe so should be called while locked.
Args:
who: [string] Who is asking, for logging purposes.
quota: [dict] The desired quota for each keyed resource, if any.
Returns:
The quota acquired if successful, or None if not.
"""
if not quota:
return {}
logging.info('"%s" attempting to acquire quota %s', who, quota)
acquired = {}
have_all = True
for key, value in quota.items():
got = self.__acquire_resource_or_none(key, value)
if not got:
have_all = False # Be lazy so we can record all the missing quota
else:
acquired[key] = got
if have_all:
return acquired
self.release_all_unsafe(who, acquired)
return None
def release_all_safe(self, who, quota):
"""Release all the resource quota.
Args:
who: [string] Who is releasing, for logging purposes.
quota: [dict] The non-None result from an acquire_all* method.
"""
with self.__condition_variable:
self.release_all_unsafe(who, quota)
self.__condition_variable.notify_all()
def release_all_unsafe(self, who, quota):
"""Release all the resource quota.
This is not thread-safe so should be called while locked.
Args:
who: [string] Who is releasing, for logging purposes.
quota: [dict] The non-None result from an acquire_all* method.
"""
if not quota:
return
logging.debug('"%s" releasing quota %s', who, quota)
for key, value in quota.items():
self.__release_resource(key, value)
def __acquire_resource_or_none(self, name, count):
"""Attempt to acquire some amount of quota.
Args:
name: [string] The name of the resource we're acquiring.
count: [int] The amount of the resource
Returns:
The amount we were given. This is either all or none. If non-zero
but less than we asked for, then it gave us the max quota it has.
In order for this to be the case, it must have all the quota available.
Otherwise it will return 0.
"""
have = self.__counts.get(name)
if have is None:
return count
if have >= count:
self.__counts[name] = have - count
self.__metrics.set(
self.FREE_QUOTA_METRIC_NAME, {'resource': name}, self.__counts[name])
return count
max_count = self.__max_counts[name]
if have == max_count:
logging.warning('Quota %s has a max of %d but %d is desired.'
' Acquiring all the quota as a best effort.',
name, max_count, count)
self.__counts[name] = 0
self.__metrics.set(
self.FREE_QUOTA_METRIC_NAME, {'resource': name}, 0)
return have
logging.warning('Quota %s has %d remaining, but %d are needed.'
' Rejecting the request for now.',
name, have, count)
self.__metrics.inc_counter(
self.INSUFFICIENT_QUOTA_METRIC_NAME, {'resource': name},
amount=count - have)
return 0
def __release_resource(self, name, count):
"""Restores previously acquired resource quota."""
have = self.__counts.get(name, None)
if have is not None:
self.__counts[name] = have + count
self.__metrics.set(
self.FREE_QUOTA_METRIC_NAME, {'resource': name}, self.__counts[name])
class ValidateBomTestController(object):
"""The test controller runs integration tests against a deployment."""
@property
def test_suite(self):
"""Returns the main test suite loaded from --test_suite."""
return self.__test_suite
@property
def options(self):
"""The configuration options."""
return self.__deployer.options
@property
def passed(self):
"""Returns the passed tests and reasons."""
return self.__passed
@property
def failed(self):
"""Returns the failed tests and reasons."""
return self.__failed
@property
def skipped(self):
"""Returns the skipped tests and reasons."""
return self.__skipped
@property
def exit_code(self):
"""Determine final exit code for all tests."""
return -1 if self.failed else 0
def __close_forwarded_ports(self):
for forwarding in self.__forwarded_ports.values():
try:
forwarding[0].kill()
except Exception as ex:
logging.error('Error terminating child: %s', ex)
def __collect_gce_quota(self, project, region,
project_percent=100.0, region_percent=100.0):
project_info_json = check_subprocess('gcloud compute project-info describe'
' --format yaml'
' --project %s' % project)
project_info = yaml.safe_load(project_info_json)
# Sometimes gce returns entries and leaves out the a "metric" it was for.
# We'll ignore those and stick them in 'UNKNOWN' for simplicity.
project_quota = {'gce_global_%s' % info.get('metric', 'UNKNOWN'):
int(max(1, math.floor(
project_percent * (info['limit'] - info['usage']))))
for info in project_info['quotas']}
region_info_json = check_subprocess('gcloud compute regions describe'
' --format yaml'
' --project %s'
' %s' % (project, region))
region_info = yaml.safe_load(region_info_json)
region_quota = {
'gce_region_%s' % info.get('metric', 'UNKNOWN'): int(max(
1, math.floor(region_percent * (info['limit'] - info['usage']))))
for info in region_info['quotas']
}
return project_quota, region_quota
def __init__(self, deployer):
options = deployer.options
quota_spec = {}
if options.google_account_project:
project_quota, region_quota = self.__collect_gce_quota(
options.google_account_project, options.test_gce_quota_region,
project_percent=options.test_gce_project_quota_factor,
region_percent=options.test_gce_region_quota_factor)
quota_spec.update(project_quota)
quota_spec.update(region_quota)
if options.test_default_quota:
quota_spec.update({
parts[0].strip(): int(parts[1])
for parts in [entry.split('=')
for entry in options.test_default_quota.split(',')]
})
if options.test_quota:
quota_spec.update(
{parts[0].strip(): int(parts[1])
for parts in [entry.split('=')
for entry in options.test_quota.split(',')]})
self.__quota_tracker = QuotaTracker(quota_spec, deployer.metrics)
self.__deployer = deployer
self.__lock = threading.Lock()
self.__passed = [] # Resulted in success
self.__failed = [] # Resulted in failure
self.__skipped = [] # Will not run at all
with open(options.test_profiles, 'r') as fd:
self.__test_suite = yaml.safe_load(fd)
self.__extra_test_bindings = (
self.__load_bindings(options.test_extra_profile_bindings)
if options.test_extra_profile_bindings
else {}
)
num_concurrent = len(self.__test_suite.get('tests')) or 1
num_concurrent = int(min(num_concurrent,
options.test_concurrency or num_concurrent))
self.__semaphore = threading.Semaphore(num_concurrent)
# dictionary of service -> ForwardedPort
self.__forwarded_ports = {}
atexit.register(self.__close_forwarded_ports)
# Map of service names to native ports.
self.__service_port_map = {
# These are critical to most tests.
'clouddriver': 7002,
'clouddriver-caching': 7002,
'clouddriver-rw': 7002,
'clouddriver-ro': 7002,
'clouddriver-ro-deck': 7002,
'gate': 8084,
'front50': 8080,
# Some tests needed these too.
'orca': 8083,
'rosco': 8087,
'igor': 8088,
'echo': 8089,
'echo-scheduler': 8089,
'echo-worker': 8089
}
# Map of services with provided endpoints and credentials.
self.__public_service_configs = {}
self.__add_gate_service_config(self.__public_service_configs)
def __add_gate_service_config(self, configs):
if not self.options.test_gate_service_base_url:
return
service_config = {
'base_url': self.options.test_gate_service_base_url,
}
credentials_path = self.options.test_gate_iap_credentials # This can be None, which would mean we use the Application Default Credentials
client_id = self.options.test_gate_iap_client_id
impersonated_service_account = self.options.test_gate_iap_impersonated_service_account
if client_id:
service_config['service_account_email'] = impersonated_service_account or get_service_account_email(credentials_path)
service_config['bearer_auth_token'] = generate_auth_token(client_id,
service_account_file=credentials_path,
impersonate_service_account_email=impersonated_service_account)
configs['gate'] = service_config
def __bearer_auth_token_or_none(self, service_name, client_id, credentials_path=None):
return generate_auth_token(client_id, credentials_path)
def __replace_ha_api_service(self, service, options):
transform_map = {}
if options.ha_clouddriver_enabled:
transform_map['clouddriver'] = 'clouddriver-rw'
if options.ha_echo_enabled:
transform_map['echo'] = ['echo-worker']
return transform_map.get(service, service)
def __load_bindings(self, path):
with open(path, 'r') as stream:
content = stream.read()
result = {}
for line in content.split('\n'):
match = re.match('^([a-zA-Z][^=])+=(.*)', line)
if match:
result[match.group(1).strip()] = match.group(2).strip()
def __forward_port_to_service(self, service_name):
"""Forward ports to the deployed service.
This is private to ensure that it is called with the lock.
The lock is needed to mitigate a race condition. See the
inline comment around the Popen call.
"""
local_port = _unused_port()
remote_port = self.__service_port_map[service_name]
command = self.__deployer.make_port_forward_command(
service_name, local_port, remote_port)
logging.info('Establishing connection to %s with port %d',
service_name, local_port)
# There seems to be an intermittent race condition here.
# Not sure if it is gcloud or python.
# Locking the individual calls seems to work around it.
#
# We dont need to lock because this function is called from within
# the lock already.
logging.debug('RUNNING %s', ' '.join(command))
# Redirect stdout to prevent buffer overflows (at least in k8s)
# but keep errors for failures.
class KeepAlive(threading.Thread):
def run(self):
while True:
try:
logging.info('KeepAlive %s polling', service_name)
got = urlopen('http://localhost:{port}/health'
.format(port=local_port))
logging.info('KeepAlive %s -> %s', service_name, got.getcode())
except Exception as ex:
logging.info('KeepAlive %s -> %s', service_name, ex)
time.sleep(20)
if self.options.deploy_spinnaker_type == 'distributed':
# For now, distributed deployments are k8s
# and K8s port forwarding with kubectl requires keep alive.
hack = KeepAlive()
hack.setDaemon(True)
hack.start()
logfile = os.path.join(
self.options.output_dir,
'port_forward_%s-%d.log' % (service_name, os.getpid()))
stream = open(logfile, 'w')
stream.write(str(command) + '\n\n')
logging.debug('Logging "%s" port forwarding to %s', service_name, logfile)
child = subprocess.Popen(
command,
stderr=subprocess.STDOUT,
stdout=stream)
return ForwardedPort(child, local_port)
def build_summary(self):
"""Return a summary of all the test results."""
def append_list_summary(summary, name, entries):
"""Write out all the names from the test results."""
if not entries:
return
summary.append('{0}:'.format(name))
for entry in entries:
summary.append(' * {0}'.format(entry[0]))
options = self.options
if not options.testing_enabled:
return 'No test output: testing was disabled.', 0
summary = ['\nSummary:']
append_list_summary(summary, 'SKIPPED', self.skipped)
append_list_summary(summary, 'PASSED', self.passed)
append_list_summary(summary, 'FAILED', self.failed)
num_skipped = len(self.skipped)
num_passed = len(self.passed)
num_failed = len(self.failed)
summary.append('')
if num_failed:
summary.append(
'FAILED {0} of {1}, skipped {2}'.format(
num_failed, (num_failed + num_passed), num_skipped))
else:
summary.append('PASSED {0}, skipped {1}'.format(num_passed, num_skipped))
return '\n'.join(summary)
def wait_on_service(self, service_name, port=None, timeout=None):
"""Wait for the given service to be available on the specified port.
Args:
service_name: [string] The service name we we are waiting on.
port: [int] The remote port the service is at.
timeout: [int] How much time to wait before giving up.
Returns:
The ForwardedPort entry for this service.
"""
try:
with self.__lock:
forwarding = self.__forwarded_ports.get(service_name)
if forwarding is None:
forwarding = self.__forward_port_to_service(service_name)
self.__forwarded_ports[service_name] = forwarding
except Exception:
logging.exception('Exception while attempting to forward ports to "%s"',
service_name)
raise
timeout = timeout or self.options.test_service_startup_timeout
end_time = time.time() + timeout
logging.info('Waiting on "%s"...', service_name)
if port is None:
port = self.__service_port_map[service_name]
# It seems we have a race condition in the poll
# where it thinks the jobs have terminated.
# I've only seen this happen once.
time.sleep(1)
threadid = hex(threading.current_thread().ident)
logging.info('WaitOn polling %s from thread %s', service_name, threadid)
while forwarding.child.poll() is None:
try:
# localhost is hardcoded here because we are port forwarding.
# timeout=20 is to appease kubectl port forwarding, which will close
# if left idle for 30s
urlopen('http://localhost:{port}/health'
.format(port=forwarding.port),
timeout=20)
logging.info('"%s" is ready on port %d | %s',
service_name, forwarding.port, threadid)
return forwarding
except HTTPError as error:
logging.warning('%s got %s. Ignoring that for now.',
service_name, error)
return forwarding
except (URLError, Exception) as error:
if time.time() >= end_time:
logging.error(
'Timing out waiting for %s | %s', service_name, threadid)
raise_and_log_error(TimeoutError(service_name, cause=service_name))
time.sleep(2.0)
logging.error('It appears %s is no longer available.'
' Perhaps the tunnel closed.',
service_name)
raise_and_log_error(
ResponseError('It appears that {0} failed'.format(service_name),
server='tunnel'))
def __validate_service_base_url(self, service_name, timeout=None):
service_config = self.__public_service_configs[service_name]
base_url = service_config['base_url']
timeout = timeout or self.options.test_service_startup_timeout
end_time = time.time() + timeout
logging.info('Validating base URL of "%s"...', service_name)
try:
url = '{base_url}/health'.format(base_url=base_url)
request = Request(url=url)
if 'bearer_auth_token' in service_config:
request.add_header('Authorization', 'Bearer {}'.format(service_config['bearer_auth_token']))
context = None
if self.options.test_ignore_ssl_cert_verification:
context = ssl._create_unverified_context()
urlopen(request, context=context)
logging.info('"%s" is ready on service endpoint %s',
service_name, base_url)
return
except HTTPError as error:
logging.error('%s service endpoint got %s.',
service_name, error)
raise_and_log_error(
ResponseError('{0} service endpoint got {1}'.format(service_name, error),
server=base_url))
except Exception as error:
raise_and_log_error(
ResponseError('{0} service endpoint got {1}'.format(service_name, error),
server=base_url))
except URLError as error:
if time.time() >= end_time:
logging.error(
'Timing out waiting for %s', service_name)
raise_and_log_error(TimeoutError(service_name, cause=service_name))
raise
def run_tests(self):
"""The actual controller that coordinates and runs the tests.
This attempts to process all the tests concurrently across
seperate threads, where each test will:
(1) Determine whether or not the test is a candidate
(passes the --test_include / --test_exclude criteria)
(2) Evaluate the test's requirements.
If the configuration requirements are not met then SKIP the test.
(a) Attempt to tunnel each of the service tests, sharing existing
tunnels used by other tests. The tunnels allocate unused local
ports to avoid potential conflict within the local machine.
(b) Wait for the service to be ready. Ideally this means it is
healthy, however we'll allow unhealthy services to proceed
as well and let those tests run and fail in case they are
testing unhealthy service situations.
(c) If there is an error or the service takes too long then
outright FAIL the test.
(3) Acquire the quota that the test requires.
* If the quota is not currently available, then block the
thread until it is. Since each test is in its own thread, this
will not impact other tests.
* Quota are only internal resources within the controller.
This is used for purposes of rate limiting, etc. It does not
coordinate with the underlying platforms.
* Quota is governed with --test_quota. If a test requests
a resource without a known quota, then the quota is assumed
to be infinite.
(4) Grab a semaphore used to rate limit running tests.
This is controlled by --test_concurrency, which defaults to all.
(5) Run the test.
(6) Release the quota and semaphore to unblock other tests.
(7) Record the outcome as PASS or FAIL
If an exception is thrown along the way, the test will automatically
be recorded as a FAILURE.
Returns:
(#passed, #failed, #skipped)
"""
options = self.options
if not options.testing_enabled:
logging.info('--testing_enabled=false skips test phase entirely.')
return 0, 0, 0
all_test_profiles = self.test_suite['tests']
logging.info(
'Running tests (concurrency=%s).',
options.test_concurrency or 'infinite')
thread_pool = ThreadPool(len(all_test_profiles))
thread_pool.map(self.__run_or_skip_test_profile_entry_wrapper,
all_test_profiles.items())
thread_pool.terminate()
logging.info('Finished running tests.')
return len(self.__passed), len(self.__failed), len(self.__skipped)
def __run_or_skip_test_profile_entry_wrapper(self, args):
"""Outer wrapper for running tests
Args:
args: [dict entry] The name and spec tuple from the mapped element.
"""
test_name = args[0]
spec = args[1]
metric_labels = {'test_name': test_name, 'skipped': ''}
try:
self.__run_or_skip_test_profile_entry(test_name, spec, metric_labels)
except Exception as ex:
logging.error('%s threw an exception:\n%s',
test_name, traceback.format_exc())
with self.__lock:
self.__failed.append((test_name, 'Caught exception {0}'.format(ex)))
def __record_skip_test(self, test_name, reason, skip_code, metric_labels):
logging.warning(reason)
self.__skipped.append((test_name, reason))
copy_labels = dict(metric_labels)
copy_labels['skipped'] = skip_code
copy_labels['success'] = 'Skipped'
self.__deployer.metrics.observe_timer(
'RunTestScript' + '_Outcome', copy_labels, 0.0)
def __run_or_skip_test_profile_entry(self, test_name, spec, metric_labels):
"""Runs a test from within the thread-pool map() function.
Args:
test_name: [string] The name of the test.
spec: [dict] The test profile specification.
"""
options = self.options
if not re.search(options.test_include, test_name):
reason = ('Skipped test "{name}" because it does not match explicit'
' --test_include criteria "{criteria}".'
.format(name=test_name, criteria=options.test_include))
self.__record_skip_test(test_name, reason,
'NotExplicitInclude', metric_labels)
return
if options.test_exclude and re.search(options.test_exclude, test_name):
reason = ('Skipped test "{name}" because it matches explicit'
' --test_exclude criteria "{criteria}".'
.format(name=test_name, criteria=options.test_exclude))
self.__record_skip_test(test_name, reason,
'ExplicitExclude', metric_labels)
return
# This can raise an exception
self.run_test_profile_helper(test_name, spec, metric_labels)
def validate_test_requirements(self, test_name, spec, metric_labels):
"""Determine whether or not the test requirements are satisfied.
If not, record the reason a skip or failure.
This may throw exceptions, which are immediate failure.
Args:
test_name: [string] The name of the test.
spec: [dict] The profile specification containing requirements.
This argument will be pruned as values are consumed from it.
Returns:
True if requirements are satisfied, False if not.
"""
if not 'api' in spec:
raise_and_log_error(
UnexpectedError('Test "{name}" is missing an "api" spec.'.format(
name=test_name)))
requires = spec.pop('requires', {})
configuration = requires.pop('configuration', {})
our_config = vars(self.options)
for key, value in configuration.items():
if key not in our_config:
message = ('Unknown configuration key "{0}" for test "{1}"'
.format(key, test_name))
raise_and_log_error(ConfigError(message))
if value != our_config[key]:
reason = ('Skipped test {name} because {key}={want} != {have}'
.format(name=test_name, key=key,
want=value, have=our_config[key]))
with self.__lock:
self.__record_skip_test(test_name, reason,
'IncompatableConfig', metric_labels)
return False
services = set(replace_ha_services(
requires.pop('services', []), self.options))
services.add(self.__replace_ha_api_service(
spec.pop('api'), self.options))
if requires:
raise_and_log_error(
ConfigError('Unexpected fields in {name}.requires: {remaining}'
.format(name=test_name, remaining=requires)))
if spec:
raise_and_log_error(
ConfigError('Unexpected fields in {name} specification: {remaining}'
.format(name=test_name, remaining=spec)))
for service in self.__public_service_configs:
self.__validate_service_base_url(service)
if self.options.test_wait_on_services:
def wait_on_services(services):
thread_pool = ThreadPool(len(services))
thread_pool.map(self.wait_on_service, services)
thread_pool.terminate()
self.__deployer.metrics.track_and_time_call(
'WaitingOnServiceAvailability',
metric_labels, self.__deployer.metrics.default_determine_outcome_labels,
wait_on_services, services)
else:
logging.warning('Skipping waiting for services')
return True
def add_extra_arguments(self, test_name, args, commandline):
"""Add extra arguments to the commandline.
Args:
test_name: [string] Name of test specifying the options.
args: [dict] Specification of additioanl arguments to pass.
Each key is the name of the argument, the value is the value to pass.
If the value is preceeded with a '$' then it refers to the value of
an option. If the value is None then just add the key without an arg.
commandline: [list] The list of command line arguments to append to.
"""
option_dict = vars(self.options)
aliases_dict = self.test_suite.get('aliases', {})
for key, value in args.items():
if isinstance(value, (int, bool)):
value = str(value)
if key == 'alias':
for alias_name in value:
if not alias_name in aliases_dict:
raise_and_log_error(ConfigError(
'Unknown alias "{name}" referenced in args for "{test}"'
.format(name=alias_name, test=test_name)))
self.add_extra_arguments(
test_name, aliases_dict[alias_name], commandline)
continue
elif value is None:
pass
elif value.startswith('$'):
option_name = value[1:]
if option_name in option_dict:
value = option_dict[option_name] or '""'
elif option_name in self.__extra_test_bindings:
value = self.__extra_test_bindings[option_name] or '""'
elif option_name in os.environ:
value = os.environ[option_name]
else:
raise_and_log_error(ConfigError(
'Unknown option "{name}" referenced in args for "{test}"'
.format(name=option_name, test=test_name)))
if value is None:
commandline.append('--' + key)
else:
commandline.extend(['--' + key, value])
def make_test_command_or_none(self, test_name, spec, metric_labels):
"""Returns the command to run the test, or None to skip.
Args:
test_name: The test to run.
spec: The test specification profile.
This argument will be pruned as values are consumed from it.
Returns:
The command line argument list, or None to skip.
This may throw an exception if the spec is invalid.
This does not consider quota, which is checked later.
"""
options = self.options
microservice_api = self.__replace_ha_api_service(spec.get('api'), options)
test_rel_path = spec.pop('path', None) or os.path.join(
'citest', 'tests', '{0}.py'.format(test_name))
args = spec.pop('args', {})
if not self.validate_test_requirements(test_name, spec, metric_labels):
return None
testing_root_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'testing'))
test_path = os.path.join(testing_root_dir, test_rel_path)
citest_log_dir = os.path.join(options.log_dir, 'citest_logs')
if not os.path.exists(citest_log_dir):
try:
os.makedirs(citest_log_dir)
except:
# check for race condition
if not os.path.exists(citest_log_dir):
raise
command = [
'python', test_path,
'--log_dir', citest_log_dir,
'--log_filebase', test_name,
'--ignore_ssl_cert_verification', str(options.test_ignore_ssl_cert_verification)
]
if microservice_api in self.__public_service_configs:
service_config = self.__public_service_configs[microservice_api]
command.extend([
'--native_base_url', service_config['base_url']
])
if 'bearer_auth_token' in service_config:
command.extend([
'--bearer_auth_token', service_config['bearer_auth_token']
])
if 'service_account_email' in service_config:
command.extend([
'--test_user', service_config['service_account_email']
])
else:
command.extend([
'--native_host', 'localhost',
'--native_port', str(self.__forwarded_ports[microservice_api].port)
])
if options.test_stack:
command.extend(['--test_stack', str(options.test_stack)])
self.add_extra_arguments(test_name, args, command)
return command
def __execute_test_command(self, test_name, command, metric_labels):
metrics = self.__deployer.metrics
logging.debug('Running %s', ' '.join(command))
def run_and_log_test_script(command):
logfile = os.path.join(self.options.output_dir, 'citest_logs',
'%s-%s.console.log' % (test_name, os.getpid()))
logging.info('Logging test "%s" to %s', test_name, logfile)
try:
check_subprocesses_to_logfile('running test', logfile, [command])
retcode = 0
logging.info('Test %s PASSED -- see %s', test_name, logfile)
except:
retcode = -1
logging.info('Test %s FAILED -- see %s', test_name, logfile)
return retcode, logfile
return metrics.track_and_time_call(
'RunTestScript',
metric_labels, determine_subprocess_outcome_labels,
run_and_log_test_script, ' '.join(command))
def run_test_profile_helper(self, test_name, spec, metric_labels):
"""Helper function for running an individual test.
The caller wraps this to trap and handle exceptions.
Args:
test_name: The test being run.
spec: The test specification profile.
This argument will be pruned as values are consumed from it.
"""
quota = spec.pop('quota', {})
command = self.make_test_command_or_none(test_name, spec, metric_labels)
if command is None:
return
logging.info('Acquiring quota for test "%s"...', test_name)
quota_tracker = self.__quota_tracker
metrics = self.__deployer.metrics
acquired_quota = metrics.track_and_time_call(
'ResourceQuotaWait',
metric_labels, metrics.default_determine_outcome_labels,
quota_tracker.acquire_all_safe, test_name, quota)
if acquired_quota:
logging.info('"%s" acquired quota %s', test_name, acquired_quota)
execute_time = None
start_time = time.time()
try:
logging.info('Scheduling "%s"...', test_name)
# This will block. Note that we already acquired quota, thus
# we are blocking holding onto that quota. However since we are
# blocked awaiting a thread, nobody else can execute either,
# so it doesnt matter that we might be starving them of quota.
self.__semaphore.acquire(True)
execute_time = time.time()
wait_time = int(execute_time - start_time + 0.5)
if wait_time > 1:
logging.info('"%s" had a semaphore contention for %d secs.',
test_name, wait_time)
logging.info('Executing "%s"...', test_name)
retcode, logfile_path = self.__execute_test_command(
test_name, command, metric_labels)
finally:
logging.info('Finished executing "%s"...', test_name)
self.__semaphore.release()
if acquired_quota:
quota_tracker.release_all_safe(test_name, acquired_quota)
end_time = time.time()
delta_time = int(end_time - execute_time + 0.5)
with self.__lock:
if not retcode:
logging.info('%s PASSED after %d secs', test_name, delta_time)
self.__passed.append((test_name, logfile_path))
else:
logging.info('FAILED %s after %d secs', test_name, delta_time)
self.__failed.append((test_name, logfile_path))
def init_argument_parser(parser, defaults):
"""Add testing related command-line parameters."""
add_parser_argument(
parser, 'test_profiles',
defaults, os.path.join(os.path.dirname(__file__), 'all_tests.yaml'),
help='The path to the set of test profiles.')
add_parser_argument(
parser, 'test_extra_profile_bindings', defaults, None,
help='Path to a file with additional bindings that the --test_profiles'
' file may reference.')
add_parser_argument(
parser, 'test_concurrency', defaults, None, type=int,
help='Limits how many tests to run at a time. Default is unbounded')
add_parser_argument(
parser, 'test_service_startup_timeout', defaults, 600, type=int,
help='Number of seconds to permit services to startup before giving up.')
add_parser_argument(
parser, 'test_gce_project_quota_factor', defaults, 1.0, type=float,
help='Default percentage of available project quota to make available'
' for tests.')
add_parser_argument(
parser, 'test_gce_region_quota_factor', defaults, 1.0, type=float,
help='Default percentage of available region quota to make available'
' for tests.')
add_parser_argument(
parser, 'test_gce_quota_region', defaults, 'us-central1',
help='GCE Compute Region to gather region quota limits from.')
add_parser_argument(
parser, 'test_default_quota',
defaults, '',
help='Default quota parameters for values used in the --test_profiles.'
' This does not include GCE quota values, which are determined'
' at runtime. These value can be further overriden by --test_quota.'
' These are meant as built-in defaults, where --test_quota as'
' per-execution overriden.')
add_parser_argument(
parser, 'test_quota', defaults, '',
help='Comma-delimited name=value list of quota overrides.')
add_parser_argument(
parser, 'testing_enabled', defaults, True, type=bool,
help='If false then do not run the testing phase.')
add_parser_argument(
parser, 'test_disable', defaults, False, action='store_true',
dest='testing_enabled',
help='DEPRECATED: Use --testing_enabled=false.')
add_parser_argument(
parser, 'test_wait_on_services', defaults, True, type=bool,
help='If false then do not wait on services to be ready during'
' testing phase.')
add_parser_argument(
parser, 'test_include', defaults, '.*',
help='Regular expression of tests to run or None for all.')
add_parser_argument(
parser, 'test_exclude', defaults, None,
help='Regular expression of otherwise runnable tests to skip.')
add_parser_argument(
parser, 'test_stack', defaults, None,
help='The --test_stack to pass through to tests indicating which'
' Spinnaker application "stack" to use. This is typically'
' to help trace the source of resources created within the'
' tests.')
add_parser_argument(
parser, 'test_jenkins_job_name', defaults, 'TriggerBake',
help='The Jenkins job name to use in tests.')
add_parser_argument(
parser, 'test_gate_service_base_url', defaults, None,
help='Gate base URL (including protocol, host, and port) to use'
' rather than port-forwarding.')
add_parser_argument(
parser, 'test_gate_iap_client_id', defaults, None,
help='IAP client ID used to authenticate requests to an'
' IAP-protected Spinnaker. The inclusion of this flag'
' indicates that the Gate service is IAP-protected.')
add_parser_argument(
parser, 'test_gate_iap_credentials', defaults, None,
help='Path to google credentials file to authenticate requests'
' to an IAP-protected Spinnaker. This must be used with the'
' test_gate_iap_client_id flag.'
' If left empty then use Application Default Credentials.')
add_parser_argument(
parser, 'test_gate_iap_impersonated_service_account', defaults, None,
help='Service account to impersonate to receive the credentials'
' to make authenticated requests to an IAP-protected Spinnaker.'
' If test_gate_iap_credentials is provided, the service account'
' specified by test_gate_iap_credentials will impersonate this'
' service account. If test_gate_iap_credentials is not provided,'
' the Application Default Credentials will be used to impersonate'
' this service account. This must be used with the'
' test_gate_iap_client_id flag.'
' If left empty then no service account will be impersonated.')
add_parser_argument(
parser, 'test_ignore_ssl_cert_verification', defaults, False, type=bool,
help='Whether or not to ignore SSL certificate verification when making'
' requests to Spinnaker. This is False by default.')
add_parser_argument(
parser, 'test_appengine_region', defaults, 'us-central',
help='Region to use for AppEngine tests.')
def validate_options(options):
"""Validate testing related command-line parameters."""
if not os.path.exists(options.test_profiles):
raise_and_log_error(
ConfigError('--test_profiles "{0}" does not exist.'.format(
options.test_profiles)))
|
duftler/spinnaker
|
dev/validate_bom__test.py
|
Python
|
apache-2.0
| 44,197
|
[
"ORCA"
] |
c786efbf8f4a933dfaf4dac6957fd32071ecd59afa7714dfc4953faa7ece9667
|
import django_tables2 as tables
from django_tables2.utils import A, Accessor
from mmg.jobtrak.links.models import *
from django.utils.translation import ugettext_lazy as _
import external_urls
class JobBoardTable(tables.Table):
url = tables.TemplateColumn(
'<a href="{% load external_urls %}{% external_url record.url %}" target="_blank">{{record.name}}</a>',
verbose_name="Web Site", order_by=A('name')
)
last_click = tables.DateTimeColumn(
format="D, j N",
verbose_name="Last Visit",
attrs={'td': {'nowrap': 'nowrap'}} )
note = tables.Column(verbose_name="Description")
class Meta:
model = JobBoard
attrs = { "class": "table" }
fields = ('url','last_click','note',)
|
MarconiMediaGroup/JobTrak
|
web/code/mmg/jobtrak/links/tables.py
|
Python
|
apache-2.0
| 758
|
[
"VisIt"
] |
586d4d812c100aaf13f7352ec457be92a09d88bb7c5815ca463ad6e851b2ba99
|
"""Phonon band-structure for silicon using the Appelbaum-Hamann PP.
For comparison, see e.g.:
* Phys. Rev. B 43, 7231 (1991).
"""
import numpy as np
import pylab as plt
import ase.units as units
from ase.dft.kpoints import ibz_points, get_bandpath
from gpaw.mpi import rank, world
from gpaw.dfpt import PhononCalculator
# Pseudo-potential
PP = 'AH'
# Name of file with ground-state calculation
name = 'Si_%s.gpw' % PP
# Create phonon calculator
ph = PhononCalculator(name,
gamma=False,
symmetry=False,
e_ph=False)
# Run the self-consistent calculation
ph.run()
# Ensure that the master does not enter here before all files have been created
world.barrier()
# Calculate band-structure and plot on master
if rank == 0:
# High-symmetry points in the Brillouin zone
points = ibz_points['fcc']
G = points['Gamma']
X = points['X']
W = points['W']
K = points['K']
L = points['L']
atoms = ph.get_atoms()
path_kc, q, Q = get_bandpath([G, K, X, G, L, X, W, L],
atoms.cell, 100)
point_names = ['$\Gamma$', 'K', 'X', '$\Gamma$', 'L', 'X', 'W', 'L']
# Calculate band-structure
omega_kn = ph.band_structure(path_kc)
# Convert from sqrt(Ha / Bohr**2 / amu) -> meV
s = units.Hartree**0.5 * units._hbar * 1.e10 / \
(units._e * units._amu)**(0.5) / units.Bohr
omega_kn *= s * 1000
# Plot the band-structure
plt.figure(1)
for n in range(len(omega_kn[0])):
plt.plot(q, omega_kn[:, n], 'k-', lw=2)
plt.xticks(Q, point_names, fontsize=18)
plt.yticks(fontsize=18)
plt.xlim(q[0], q[-1])
plt.ylim(0, np.ceil(omega_kn.max() / 10) * 10)
plt.ylabel("Frequency ($\mathrm{meV}$)", fontsize=22)
plt.grid('on')
# plt.show()
plt.savefig('Si_bandstructure.png')
|
qsnake/gpaw
|
gpaw/test/big/dfpt/Si_bandstructure.py
|
Python
|
gpl-3.0
| 1,888
|
[
"ASE",
"GPAW"
] |
fde80706b7d298bf52d5de22d360dc18b262f57b696db8d97be5df31907ce458
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.394491
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/mobile/channels.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class channels(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(channels, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 17, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 17, col 49.
write(u'''</div>\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['channels'] # u"$tstrings['channels']" on line 23, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['channels']")) # from line 23, col 64.
write(u'''</li>\r
''')
for channel in VFFSL(SL,"channels",True): # generated from line 24, col 5
write(u'''\t\t\t\t<li>\r
\t\t\t\t<a href="/mobile/channelinfo?sref=''')
_v = VFFSL(SL,"channel.ref",True) # u'$channel.ref' on line 26, col 39
if _v is not None: write(_filter(_v, rawExpr=u'$channel.ref')) # from line 26, col 39.
write(u'''" style="padding: 3px;">\r
\t\t\t\t<span class="ui-li-heading" style="margin-top: 0px; margin-bottom: 3px;">''')
_v = VFFSL(SL,"channel.name",True) # u'$channel.name' on line 27, col 78
if _v is not None: write(_filter(_v, rawExpr=u'$channel.name')) # from line 27, col 78.
write(u'''</span>\r
''')
if VFN(VFFSL(SL,"channel",True),"has_key",False)('now_title'): # generated from line 28, col 5
write(u'''\t\t\t\t<span class="ui-li-desc" style="margin-bottom: 0px;">''')
_v = VFFSL(SL,"channel.now_title",True) # u'$channel.now_title' on line 29, col 58
if _v is not None: write(_filter(_v, rawExpr=u'$channel.now_title')) # from line 29, col 58.
write(u'''</span>\r
''')
write(u'''\t\t\t\t</a>\r
\t\t\t\t</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 39, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 39, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_channels= 'respond'
## END CLASS DEFINITION
if not hasattr(channels, '_initCheetahAttributes'):
templateAPIClass = getattr(channels, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(channels)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=channels()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/mobile/channels.py
|
Python
|
gpl-2.0
| 7,404
|
[
"VisIt"
] |
cba79259b4bce69bcb95250648af7a6594a16a1f6ba2fb84a71533edae596496
|
"Test the RawRadialDistributionFunction function."
from numpy import *
from asap3 import *
from asap3 import _asap
from ase.lattice.compounds import NaCl
from asap3.testtools import *
from asap3.Internal.ListOfElements import ListOfElements
print_version(1)
# testtypes: latticeconst, maxRDF, bins, useEMT
testtypes = ((5.1, 6.001, 100, False),
(5.2, 6.001, 100, True),
(5.1, 15.001, 100, False),
(5.15, 15.001, 100, True))
for latconst, maxrdf, nbins, withemt in testtypes:
atoms = NaCl(directions=[[1,0,0],[0,1,0],[0,0,1]], symbol=("Cu","Au"),
size=(10,10,10), latticeconstant=latconst, debug=0)
natoms = len(atoms)
ReportTest("Number of atoms", natoms, 8000, 0)
if withemt:
atoms.set_calculator(EMT())
print atoms.get_potential_energy()
result = _asap.RawRDF(atoms, maxrdf, nbins, zeros(len(atoms), int32), 1,
ListOfElements(atoms))
z = atoms.get_atomic_numbers()[0]
globalrdf, rdfdict, countdict = result
print globalrdf
localrdf = zeros(globalrdf.shape)
for i in (29,79):
for j in (29,79):
x = rdfdict[0][(i,j)]
print "LOCAL", i, j
print x
localrdf += x
ReportTest("Local and global RDF are identical",
min( globalrdf == localrdf), 1, 0)
ReportTest("Atoms are counted correctly",
countdict[0][29] + countdict[0][79], natoms, 0)
shellpop = [6, 12, 8, 6, 24, -1]
shell = [sqrt(i+1.0)/2.0 for i in range(6)]
print shell
print shellpop
n = 0
dr = maxrdf/nbins
for i in range(nbins):
if (i+1)*dr >= shell[n] * latconst:
if shellpop[n] == -1:
print "Reached the end of the test data"
break
ReportTest(("Shell %d (%d)" % (n+1, i)), globalrdf[i],
natoms*shellpop[n], 0)
n += 1
else:
ReportTest(("Between shells (%d)" % (i,)), globalrdf[i], 0, 0)
ReportTest.Summary()
|
auag92/n2dm
|
Asap-3.8.4/Test/RawRDF2.py
|
Python
|
mit
| 2,068
|
[
"ASE"
] |
443e00c9115252b78c41ec131b0404d0076d09345e64af07a76526c9f5ed495c
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for MLlib Python DataFrame-based APIs.
"""
import sys
if sys.version > '3':
xrange = range
basestring = str
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from shutil import rmtree
import tempfile
import array as pyarray
import numpy as np
from numpy import abs, all, arange, array, array_equal, inf, ones, tile, zeros
import inspect
import py4j
from pyspark import keyword_only, SparkContext
from pyspark.ml import Estimator, Model, Pipeline, PipelineModel, Transformer, UnaryTransformer
from pyspark.ml.classification import *
from pyspark.ml.clustering import *
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.evaluation import BinaryClassificationEvaluator, ClusteringEvaluator, \
MulticlassClassificationEvaluator, RegressionEvaluator
from pyspark.ml.feature import *
from pyspark.ml.fpm import FPGrowth, FPGrowthModel
from pyspark.ml.image import ImageSchema
from pyspark.ml.linalg import DenseMatrix, DenseMatrix, DenseVector, Matrices, MatrixUDT, \
SparseMatrix, SparseVector, Vector, VectorUDT, Vectors
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCol, HasMaxIter, HasSeed
from pyspark.ml.recommendation import ALS
from pyspark.ml.regression import DecisionTreeRegressor, GeneralizedLinearRegression, \
LinearRegression
from pyspark.ml.stat import ChiSquareTest
from pyspark.ml.tuning import *
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaParams, JavaWrapper
from pyspark.serializers import PickleSerializer
from pyspark.sql import DataFrame, Row, SparkSession, HiveContext
from pyspark.sql.functions import rand
from pyspark.sql.types import DoubleType, IntegerType
from pyspark.storagelevel import *
from pyspark.tests import QuietTest, ReusedPySparkTestCase as PySparkTestCase
ser = PickleSerializer()
class MLlibTestCase(unittest.TestCase):
def setUp(self):
self.sc = SparkContext('local[4]', "MLlib tests")
self.spark = SparkSession(self.sc)
def tearDown(self):
self.spark.stop()
class SparkSessionTestCase(PySparkTestCase):
@classmethod
def setUpClass(cls):
PySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
PySparkTestCase.tearDownClass()
cls.spark.stop()
class MockDataset(DataFrame):
def __init__(self):
self.index = 0
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class MockTransformer(Transformer, HasFake):
def __init__(self):
super(MockTransformer, self).__init__()
self.dataset_index = None
def _transform(self, dataset):
self.dataset_index = dataset.index
dataset.index += 1
return dataset
class MockUnaryTransformer(UnaryTransformer, DefaultParamsReadable, DefaultParamsWritable):
shift = Param(Params._dummy(), "shift", "The amount by which to shift " +
"data in a DataFrame",
typeConverter=TypeConverters.toFloat)
def __init__(self, shiftVal=1):
super(MockUnaryTransformer, self).__init__()
self._setDefault(shift=1)
self._set(shift=shiftVal)
def getShift(self):
return self.getOrDefault(self.shift)
def setShift(self, shift):
self._set(shift=shift)
def createTransformFunc(self):
shiftVal = self.getShift()
return lambda x: x + shiftVal
def outputDataType(self):
return DoubleType()
def validateInputType(self, inputType):
if inputType != DoubleType():
raise TypeError("Bad input type: {}. ".format(inputType) +
"Requires Double.")
class MockEstimator(Estimator, HasFake):
def __init__(self):
super(MockEstimator, self).__init__()
self.dataset_index = None
def _fit(self, dataset):
self.dataset_index = dataset.index
model = MockModel()
self._copyValues(model)
return model
class MockModel(MockTransformer, Model, HasFake):
pass
class JavaWrapperMemoryTests(SparkSessionTestCase):
def test_java_object_gets_detached(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=1, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
summary = model.summary
self.assertIsInstance(model, JavaWrapper)
self.assertIsInstance(summary, JavaWrapper)
self.assertIsInstance(model, JavaParams)
self.assertNotIsInstance(summary, JavaParams)
error_no_object = 'Target Object ID does not exist for this gateway'
self.assertIn("LinearRegression_", model._java_obj.toString())
self.assertIn("LinearRegressionTrainingSummary", summary._java_obj.toString())
model.__del__()
with self.assertRaisesRegexp(py4j.protocol.Py4JError, error_no_object):
model._java_obj.toString()
self.assertIn("LinearRegressionTrainingSummary", summary._java_obj.toString())
try:
summary.__del__()
except:
pass
with self.assertRaisesRegexp(py4j.protocol.Py4JError, error_no_object):
model._java_obj.toString()
with self.assertRaisesRegexp(py4j.protocol.Py4JError, error_no_object):
summary._java_obj.toString()
class ParamTypeConversionTests(PySparkTestCase):
"""
Test that param type conversion happens.
"""
def test_int(self):
lr = LogisticRegression(maxIter=5.0)
self.assertEqual(lr.getMaxIter(), 5)
self.assertTrue(type(lr.getMaxIter()) == int)
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter="notAnInt"))
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter=5.1))
def test_float(self):
lr = LogisticRegression(tol=1)
self.assertEqual(lr.getTol(), 1.0)
self.assertTrue(type(lr.getTol()) == float)
self.assertRaises(TypeError, lambda: LogisticRegression(tol="notAFloat"))
def test_vector(self):
ewp = ElementwiseProduct(scalingVec=[1, 3])
self.assertEqual(ewp.getScalingVec(), DenseVector([1.0, 3.0]))
ewp = ElementwiseProduct(scalingVec=np.array([1.2, 3.4]))
self.assertEqual(ewp.getScalingVec(), DenseVector([1.2, 3.4]))
self.assertRaises(TypeError, lambda: ElementwiseProduct(scalingVec=["a", "b"]))
def test_list(self):
l = [0, 1]
for lst_like in [l, np.array(l), DenseVector(l), SparseVector(len(l),
range(len(l)), l), pyarray.array('l', l), xrange(2), tuple(l)]:
converted = TypeConverters.toList(lst_like)
self.assertEqual(type(converted), list)
self.assertListEqual(converted, l)
def test_list_int(self):
for indices in [[1.0, 2.0], np.array([1.0, 2.0]), DenseVector([1.0, 2.0]),
SparseVector(2, {0: 1.0, 1: 2.0}), xrange(1, 3), (1.0, 2.0),
pyarray.array('d', [1.0, 2.0])]:
vs = VectorSlicer(indices=indices)
self.assertListEqual(vs.getIndices(), [1, 2])
self.assertTrue(all([type(v) == int for v in vs.getIndices()]))
self.assertRaises(TypeError, lambda: VectorSlicer(indices=["a", "b"]))
def test_list_float(self):
b = Bucketizer(splits=[1, 4])
self.assertEqual(b.getSplits(), [1.0, 4.0])
self.assertTrue(all([type(v) == float for v in b.getSplits()]))
self.assertRaises(TypeError, lambda: Bucketizer(splits=["a", 1.0]))
def test_list_string(self):
for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
idx_to_string = IndexToString(labels=labels)
self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
def test_string(self):
lr = LogisticRegression()
for col in ['features', u'features', np.str_('features')]:
lr.setFeaturesCol(col)
self.assertEqual(lr.getFeaturesCol(), 'features')
self.assertRaises(TypeError, lambda: LogisticRegression(featuresCol=2.3))
def test_bool(self):
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept=1))
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept="false"))
class PipelineTests(PySparkTestCase):
def test_pipeline(self):
dataset = MockDataset()
estimator0 = MockEstimator()
transformer1 = MockTransformer()
estimator2 = MockEstimator()
transformer3 = MockTransformer()
pipeline = Pipeline(stages=[estimator0, transformer1, estimator2, transformer3])
pipeline_model = pipeline.fit(dataset, {estimator0.fake: 0, transformer1.fake: 1})
model0, transformer1, model2, transformer3 = pipeline_model.stages
self.assertEqual(0, model0.dataset_index)
self.assertEqual(0, model0.getFake())
self.assertEqual(1, transformer1.dataset_index)
self.assertEqual(1, transformer1.getFake())
self.assertEqual(2, dataset.index)
self.assertIsNone(model2.dataset_index, "The last model shouldn't be called in fit.")
self.assertIsNone(transformer3.dataset_index,
"The last transformer shouldn't be called in fit.")
dataset = pipeline_model.transform(dataset)
self.assertEqual(2, model0.dataset_index)
self.assertEqual(3, transformer1.dataset_index)
self.assertEqual(4, model2.dataset_index)
self.assertEqual(5, transformer3.dataset_index)
self.assertEqual(6, dataset.index)
def test_identity_pipeline(self):
dataset = MockDataset()
def doTransform(pipeline):
pipeline_model = pipeline.fit(dataset)
return pipeline_model.transform(dataset)
# check that empty pipeline did not perform any transformation
self.assertEqual(dataset.index, doTransform(Pipeline(stages=[])).index)
# check that failure to set stages param will raise KeyError for missing param
self.assertRaises(KeyError, lambda: doTransform(Pipeline()))
class TestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(TestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(OtherTestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class HasThrowableProperty(Params):
def __init__(self):
super(HasThrowableProperty, self).__init__()
self.p = Param(self, "none", "empty param")
@property
def test_property(self):
raise RuntimeError("Test property to raise error when invoked")
class ParamTests(SparkSessionTestCase):
def test_copy_new_parent(self):
testParams = TestParams()
# Copying an instantiated param should fail
with self.assertRaises(ValueError):
testParams.maxIter._copy_new_parent(testParams)
# Copying a dummy param should succeed
TestParams.maxIter._copy_new_parent(testParams)
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_hasparam(self):
testParams = TestParams()
self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
self.assertFalse(testParams.hasParam("notAParameter"))
self.assertTrue(testParams.hasParam(u"maxIter"))
def test_resolveparam(self):
testParams = TestParams()
self.assertEqual(testParams._resolveParam(testParams.maxIter), testParams.maxIter)
self.assertEqual(testParams._resolveParam("maxIter"), testParams.maxIter)
self.assertEqual(testParams._resolveParam(u"maxIter"), testParams.maxIter)
if sys.version_info[0] >= 3:
# In Python 3, it is allowed to get/set attributes with non-ascii characters.
e_cls = AttributeError
else:
e_cls = UnicodeEncodeError
self.assertRaises(e_cls, lambda: testParams._resolveParam(u"아"))
def test_params(self):
testParams = TestParams()
maxIter = testParams.maxIter
inputCol = testParams.inputCol
seed = testParams.seed
params = testParams.params
self.assertEqual(params, [inputCol, maxIter, seed])
self.assertTrue(testParams.hasParam(maxIter.name))
self.assertTrue(testParams.hasDefault(maxIter))
self.assertFalse(testParams.isSet(maxIter))
self.assertTrue(testParams.isDefined(maxIter))
self.assertEqual(testParams.getMaxIter(), 10)
testParams.setMaxIter(100)
self.assertTrue(testParams.isSet(maxIter))
self.assertEqual(testParams.getMaxIter(), 100)
self.assertTrue(testParams.hasParam(inputCol.name))
self.assertFalse(testParams.hasDefault(inputCol))
self.assertFalse(testParams.isSet(inputCol))
self.assertFalse(testParams.isDefined(inputCol))
with self.assertRaises(KeyError):
testParams.getInputCol()
otherParam = Param(Params._dummy(), "otherParam", "Parameter used to test that " +
"set raises an error for a non-member parameter.",
typeConverter=TypeConverters.toString)
with self.assertRaises(ValueError):
testParams.set(otherParam, "value")
# Since the default is normally random, set it to a known number for debug str
testParams._setDefault(seed=41)
testParams.setSeed(43)
self.assertEqual(
testParams.explainParams(),
"\n".join(["inputCol: input column name. (undefined)",
"maxIter: max number of iterations (>= 0). (default: 10, current: 100)",
"seed: random seed. (default: 41, current: 43)"]))
def test_kmeans_param(self):
algo = KMeans()
self.assertEqual(algo.getInitMode(), "k-means||")
algo.setK(10)
self.assertEqual(algo.getK(), 10)
algo.setInitSteps(10)
self.assertEqual(algo.getInitSteps(), 10)
self.assertEqual(algo.getDistanceMeasure(), "euclidean")
algo.setDistanceMeasure("cosine")
self.assertEqual(algo.getDistanceMeasure(), "cosine")
def test_hasseed(self):
noSeedSpecd = TestParams()
withSeedSpecd = TestParams(seed=42)
other = OtherTestParams()
# Check that we no longer use 42 as the magic number
self.assertNotEqual(noSeedSpecd.getSeed(), 42)
origSeed = noSeedSpecd.getSeed()
# Check that we only compute the seed once
self.assertEqual(noSeedSpecd.getSeed(), origSeed)
# Check that a specified seed is honored
self.assertEqual(withSeedSpecd.getSeed(), 42)
# Check that a different class has a different seed
self.assertNotEqual(other.getSeed(), noSeedSpecd.getSeed())
def test_param_property_error(self):
param_store = HasThrowableProperty()
self.assertRaises(RuntimeError, lambda: param_store.test_property)
params = param_store.params # should not invoke the property 'test_property'
self.assertEqual(len(params), 1)
def test_word2vec_param(self):
model = Word2Vec().setWindowSize(6)
# Check windowSize is set properly
self.assertEqual(model.getWindowSize(), 6)
def test_copy_param_extras(self):
tp = TestParams(seed=42)
extra = {tp.getParam(TestParams.inputCol.name): "copy_input"}
tp_copy = tp.copy(extra=extra)
self.assertEqual(tp.uid, tp_copy.uid)
self.assertEqual(tp.params, tp_copy.params)
for k, v in extra.items():
self.assertTrue(tp_copy.isDefined(k))
self.assertEqual(tp_copy.getOrDefault(k), v)
copied_no_extra = {}
for k, v in tp_copy._paramMap.items():
if k not in extra:
copied_no_extra[k] = v
self.assertEqual(tp._paramMap, copied_no_extra)
self.assertEqual(tp._defaultParamMap, tp_copy._defaultParamMap)
def test_logistic_regression_check_thresholds(self):
self.assertIsInstance(
LogisticRegression(threshold=0.5, thresholds=[0.5, 0.5]),
LogisticRegression
)
self.assertRaisesRegexp(
ValueError,
"Logistic Regression getThreshold found inconsistent.*$",
LogisticRegression, threshold=0.42, thresholds=[0.5, 0.5]
)
def test_preserve_set_state(self):
dataset = self.spark.createDataFrame([(0.5,)], ["data"])
binarizer = Binarizer(inputCol="data")
self.assertFalse(binarizer.isSet("threshold"))
binarizer.transform(dataset)
binarizer._transfer_params_from_java()
self.assertFalse(binarizer.isSet("threshold"),
"Params not explicitly set should remain unset after transform")
def test_default_params_transferred(self):
dataset = self.spark.createDataFrame([(0.5,)], ["data"])
binarizer = Binarizer(inputCol="data")
# intentionally change the pyspark default, but don't set it
binarizer._defaultParamMap[binarizer.outputCol] = "my_default"
result = binarizer.transform(dataset).select("my_default").collect()
self.assertFalse(binarizer.isSet(binarizer.outputCol))
self.assertEqual(result[0][0], 1.0)
@staticmethod
def check_params(test_self, py_stage, check_params_exist=True):
"""
Checks common requirements for Params.params:
- set of params exist in Java and Python and are ordered by names
- param parent has the same UID as the object's UID
- default param value from Java matches value in Python
- optionally check if all params from Java also exist in Python
"""
py_stage_str = "%s %s" % (type(py_stage), py_stage)
if not hasattr(py_stage, "_to_java"):
return
java_stage = py_stage._to_java()
if java_stage is None:
return
test_self.assertEqual(py_stage.uid, java_stage.uid(), msg=py_stage_str)
if check_params_exist:
param_names = [p.name for p in py_stage.params]
java_params = list(java_stage.params())
java_param_names = [jp.name() for jp in java_params]
test_self.assertEqual(
param_names, sorted(java_param_names),
"Param list in Python does not match Java for %s:\nJava = %s\nPython = %s"
% (py_stage_str, java_param_names, param_names))
for p in py_stage.params:
test_self.assertEqual(p.parent, py_stage.uid)
java_param = java_stage.getParam(p.name)
py_has_default = py_stage.hasDefault(p)
java_has_default = java_stage.hasDefault(java_param)
test_self.assertEqual(py_has_default, java_has_default,
"Default value mismatch of param %s for Params %s"
% (p.name, str(py_stage)))
if py_has_default:
if p.name == "seed":
continue # Random seeds between Spark and PySpark are different
java_default = _java2py(test_self.sc,
java_stage.clear(java_param).getOrDefault(java_param))
py_stage._clear(p)
py_default = py_stage.getOrDefault(p)
# equality test for NaN is always False
if isinstance(java_default, float) and np.isnan(java_default):
java_default = "NaN"
py_default = "NaN" if np.isnan(py_default) else "not NaN"
test_self.assertEqual(
java_default, py_default,
"Java default %s != python default %s of param %s for Params %s"
% (str(java_default), str(py_default), p.name, str(py_stage)))
class EvaluatorTests(SparkSessionTestCase):
def test_java_params(self):
"""
This tests a bug fixed by SPARK-18274 which causes multiple copies
of a Params instance in Python to be linked to the same Java instance.
"""
evaluator = RegressionEvaluator(metricName="r2")
df = self.spark.createDataFrame([Row(label=1.0, prediction=1.1)])
evaluator.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
evaluatorCopy = evaluator.copy({evaluator.metricName: "mae"})
evaluator.evaluate(df)
evaluatorCopy.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
self.assertEqual(evaluatorCopy._java_obj.getMetricName(), "mae")
def test_clustering_evaluator_with_cosine_distance(self):
featureAndPredictions = map(lambda x: (Vectors.dense(x[0]), x[1]),
[([1.0, 1.0], 1.0), ([10.0, 10.0], 1.0), ([1.0, 0.5], 2.0),
([10.0, 4.4], 2.0), ([-1.0, 1.0], 3.0), ([-100.0, 90.0], 3.0)])
dataset = self.spark.createDataFrame(featureAndPredictions, ["features", "prediction"])
evaluator = ClusteringEvaluator(predictionCol="prediction", distanceMeasure="cosine")
self.assertEqual(evaluator.getDistanceMeasure(), "cosine")
self.assertTrue(np.isclose(evaluator.evaluate(dataset), 0.992671213, atol=1e-5))
class FeatureTests(SparkSessionTestCase):
def test_binarizer(self):
b0 = Binarizer()
self.assertListEqual(b0.params, [b0.inputCol, b0.outputCol, b0.threshold])
self.assertTrue(all([~b0.isSet(p) for p in b0.params]))
self.assertTrue(b0.hasDefault(b0.threshold))
self.assertEqual(b0.getThreshold(), 0.0)
b0.setParams(inputCol="input", outputCol="output").setThreshold(1.0)
self.assertTrue(all([b0.isSet(p) for p in b0.params]))
self.assertEqual(b0.getThreshold(), 1.0)
self.assertEqual(b0.getInputCol(), "input")
self.assertEqual(b0.getOutputCol(), "output")
b0c = b0.copy({b0.threshold: 2.0})
self.assertEqual(b0c.uid, b0.uid)
self.assertListEqual(b0c.params, b0.params)
self.assertEqual(b0c.getThreshold(), 2.0)
b1 = Binarizer(threshold=2.0, inputCol="input", outputCol="output")
self.assertNotEqual(b1.uid, b0.uid)
self.assertEqual(b1.getThreshold(), 2.0)
self.assertEqual(b1.getInputCol(), "input")
self.assertEqual(b1.getOutputCol(), "output")
def test_idf(self):
dataset = self.spark.createDataFrame([
(DenseVector([1.0, 2.0]),),
(DenseVector([0.0, 1.0]),),
(DenseVector([3.0, 0.2]),)], ["tf"])
idf0 = IDF(inputCol="tf")
self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
self.assertEqual(idf0m.uid, idf0.uid,
"Model should inherit the UID from its parent estimator.")
output = idf0m.transform(dataset)
self.assertIsNotNone(output.head().idf)
# Test that parameters transferred to Python Model
ParamTests.check_params(self, idf0m)
def test_ngram(self):
dataset = self.spark.createDataFrame([
Row(input=["a", "b", "c", "d", "e"])])
ngram0 = NGram(n=4, inputCol="input", outputCol="output")
self.assertEqual(ngram0.getN(), 4)
self.assertEqual(ngram0.getInputCol(), "input")
self.assertEqual(ngram0.getOutputCol(), "output")
transformedDF = ngram0.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a b c d", "b c d e"])
def test_stopwordsremover(self):
dataset = self.spark.createDataFrame([Row(input=["a", "panda"])])
stopWordRemover = StopWordsRemover(inputCol="input", outputCol="output")
# Default
self.assertEqual(stopWordRemover.getInputCol(), "input")
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["panda"])
self.assertEqual(type(stopWordRemover.getStopWords()), list)
self.assertTrue(isinstance(stopWordRemover.getStopWords()[0], basestring))
# Custom
stopwords = ["panda"]
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getInputCol(), "input")
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a"])
# with language selection
stopwords = StopWordsRemover.loadDefaultStopWords("turkish")
dataset = self.spark.createDataFrame([Row(input=["acaba", "ama", "biri"])])
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, [])
def test_count_vectorizer_with_binary(self):
dataset = self.spark.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(3, "c".split(' '), SparseVector(3, {2: 1.0}),)], ["id", "words", "expected"])
cv = CountVectorizer(binary=True, inputCol="words", outputCol="features")
model = cv.fit(dataset)
transformedList = model.transform(dataset).select("features", "expected").collect()
for r in transformedList:
feature, expected = r
self.assertEqual(feature, expected)
def test_count_vectorizer_with_maxDF(self):
dataset = self.spark.createDataFrame([
(0, "a b c d".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0}),),
(3, "a".split(' '), SparseVector(3, {}),)], ["id", "words", "expected"])
cv = CountVectorizer(inputCol="words", outputCol="features")
model1 = cv.setMaxDF(3).fit(dataset)
self.assertEqual(model1.vocabulary, ['b', 'c', 'd'])
transformedList1 = model1.transform(dataset).select("features", "expected").collect()
for r in transformedList1:
feature, expected = r
self.assertEqual(feature, expected)
model2 = cv.setMaxDF(0.75).fit(dataset)
self.assertEqual(model2.vocabulary, ['b', 'c', 'd'])
transformedList2 = model2.transform(dataset).select("features", "expected").collect()
for r in transformedList2:
feature, expected = r
self.assertEqual(feature, expected)
def test_count_vectorizer_from_vocab(self):
model = CountVectorizerModel.from_vocabulary(["a", "b", "c"], inputCol="words",
outputCol="features", minTF=2)
self.assertEqual(model.vocabulary, ["a", "b", "c"])
self.assertEqual(model.getMinTF(), 2)
dataset = self.spark.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 3.0, 1: 2.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 2.0}),),
(2, "a b".split(' '), SparseVector(3, {}),)], ["id", "words", "expected"])
transformed_list = model.transform(dataset).select("features", "expected").collect()
for r in transformed_list:
feature, expected = r
self.assertEqual(feature, expected)
# Test an empty vocabulary
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "vocabSize.*invalid.*0"):
CountVectorizerModel.from_vocabulary([], inputCol="words")
# Test model with default settings can transform
model_default = CountVectorizerModel.from_vocabulary(["a", "b", "c"], inputCol="words")
transformed_list = model_default.transform(dataset)\
.select(model_default.getOrDefault(model_default.outputCol)).collect()
self.assertEqual(len(transformed_list), 3)
def test_rformula_force_index_label(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
# Does not index label by default since it's numeric type.
rf = RFormula(formula="y ~ x + s")
model = rf.fit(df)
transformedDF = model.transform(df)
self.assertEqual(transformedDF.head().label, 1.0)
# Force to index label.
rf2 = RFormula(formula="y ~ x + s").setForceIndexLabel(True)
model2 = rf2.fit(df)
transformedDF2 = model2.transform(df)
self.assertEqual(transformedDF2.head().label, 0.0)
def test_rformula_string_indexer_order_type(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
rf = RFormula(formula="y ~ x + s", stringIndexerOrderType="alphabetDesc")
self.assertEqual(rf.getStringIndexerOrderType(), 'alphabetDesc')
transformedDF = rf.fit(df).transform(df)
observed = transformedDF.select("features").collect()
expected = [[1.0, 0.0], [2.0, 1.0], [0.0, 0.0]]
for i in range(0, len(expected)):
self.assertTrue(all(observed[i]["features"].toArray() == expected[i]))
def test_string_indexer_handle_invalid(self):
df = self.spark.createDataFrame([
(0, "a"),
(1, "d"),
(2, None)], ["id", "label"])
si1 = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="keep",
stringOrderType="alphabetAsc")
model1 = si1.fit(df)
td1 = model1.transform(df)
actual1 = td1.select("id", "indexed").collect()
expected1 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0), Row(id=2, indexed=2.0)]
self.assertEqual(actual1, expected1)
si2 = si1.setHandleInvalid("skip")
model2 = si2.fit(df)
td2 = model2.transform(df)
actual2 = td2.select("id", "indexed").collect()
expected2 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0)]
self.assertEqual(actual2, expected2)
def test_string_indexer_from_labels(self):
model = StringIndexerModel.from_labels(["a", "b", "c"], inputCol="label",
outputCol="indexed", handleInvalid="keep")
self.assertEqual(model.labels, ["a", "b", "c"])
df1 = self.spark.createDataFrame([
(0, "a"),
(1, "c"),
(2, None),
(3, "b"),
(4, "b")], ["id", "label"])
result1 = model.transform(df1)
actual1 = result1.select("id", "indexed").collect()
expected1 = [Row(id=0, indexed=0.0), Row(id=1, indexed=2.0), Row(id=2, indexed=3.0),
Row(id=3, indexed=1.0), Row(id=4, indexed=1.0)]
self.assertEqual(actual1, expected1)
model_empty_labels = StringIndexerModel.from_labels(
[], inputCol="label", outputCol="indexed", handleInvalid="keep")
actual2 = model_empty_labels.transform(df1).select("id", "indexed").collect()
expected2 = [Row(id=0, indexed=0.0), Row(id=1, indexed=0.0), Row(id=2, indexed=0.0),
Row(id=3, indexed=0.0), Row(id=4, indexed=0.0)]
self.assertEqual(actual2, expected2)
# Test model with default settings can transform
model_default = StringIndexerModel.from_labels(["a", "b", "c"], inputCol="label")
df2 = self.spark.createDataFrame([
(0, "a"),
(1, "c"),
(2, "b"),
(3, "b"),
(4, "b")], ["id", "label"])
transformed_list = model_default.transform(df2)\
.select(model_default.getOrDefault(model_default.outputCol)).collect()
self.assertEqual(len(transformed_list), 5)
class HasInducedError(Params):
def __init__(self):
super(HasInducedError, self).__init__()
self.inducedError = Param(self, "inducedError",
"Uniformly-distributed error added to feature")
def getInducedError(self):
return self.getOrDefault(self.inducedError)
class InducedErrorModel(Model, HasInducedError):
def __init__(self):
super(InducedErrorModel, self).__init__()
def _transform(self, dataset):
return dataset.withColumn("prediction",
dataset.feature + (rand(0) * self.getInducedError()))
class InducedErrorEstimator(Estimator, HasInducedError):
def __init__(self, inducedError=1.0):
super(InducedErrorEstimator, self).__init__()
self._set(inducedError=inducedError)
def _fit(self, dataset):
model = InducedErrorModel()
self._copyValues(model)
return model
class CrossValidatorTests(SparkSessionTestCase):
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvCopied = cv.copy()
self.assertEqual(cv.getEstimator().uid, cvCopied.getEstimator().uid)
cvModel = cv.fit(dataset)
cvModelCopied = cvModel.copy()
for index in range(len(cvModel.avgMetrics)):
self.assertTrue(abs(cvModel.avgMetrics[index] - cvModelCopied.avgMetrics[index])
< 0.0001)
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for CrossValidator will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
lrModel = cvModel.bestModel
cvModelPath = temp_path + "/cvModel"
lrModel.save(cvModelPath)
loadedLrModel = LogisticRegressionModel.load(cvModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
self.assertEqual(loadedCV.getEstimatorParamMaps(), cv.getEstimatorParamMaps())
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
def test_parallel_evaluation(self):
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [5, 6]).build()
evaluator = BinaryClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cv.setParallelism(1)
cvSerialModel = cv.fit(dataset)
cv.setParallelism(2)
cvParallelModel = cv.fit(dataset)
self.assertEqual(cvSerialModel.avgMetrics, cvParallelModel.avgMetrics)
def test_expose_sub_models(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
numFolds = 3
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
numFolds=numFolds, collectSubModels=True)
def checkSubModels(subModels):
self.assertEqual(len(subModels), numFolds)
for i in range(numFolds):
self.assertEqual(len(subModels[i]), len(grid))
cvModel = cv.fit(dataset)
checkSubModels(cvModel.subModels)
# Test the default value for option "persistSubModel" to be "true"
testSubPath = temp_path + "/testCrossValidatorSubModels"
savingPathWithSubModels = testSubPath + "cvModel3"
cvModel.save(savingPathWithSubModels)
cvModel3 = CrossValidatorModel.load(savingPathWithSubModels)
checkSubModels(cvModel3.subModels)
cvModel4 = cvModel3.copy()
checkSubModels(cvModel4.subModels)
savingPathWithoutSubModels = testSubPath + "cvModel2"
cvModel.write().option("persistSubModels", "false").save(savingPathWithoutSubModels)
cvModel2 = CrossValidatorModel.load(savingPathWithoutSubModels)
self.assertEqual(cvModel2.subModels, None)
for i in range(numFolds):
for j in range(len(grid)):
self.assertEqual(cvModel.subModels[i][j].uid, cvModel3.subModels[i][j].uid)
def test_save_load_nested_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
originalParamMap = cv.getEstimatorParamMaps()
loadedParamMap = loadedCV.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
class TrainValidationSplitTests(SparkSessionTestCase):
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(0.0, min(validationMetrics))
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(1.0, max(validationMetrics))
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
lrModel = tvsModel.bestModel
tvsModelPath = temp_path + "/tvsModel"
lrModel.save(tvsModelPath)
loadedLrModel = LogisticRegressionModel.load(tvsModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
self.assertEqual(loadedTvs.getEstimatorParamMaps(), tvs.getEstimatorParamMaps())
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_parallel_evaluation(self):
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [5, 6]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvs.setParallelism(1)
tvsSerialModel = tvs.fit(dataset)
tvs.setParallelism(2)
tvsParallelModel = tvs.fit(dataset)
self.assertEqual(tvsSerialModel.validationMetrics, tvsParallelModel.validationMetrics)
def test_expose_sub_models(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
collectSubModels=True)
tvsModel = tvs.fit(dataset)
self.assertEqual(len(tvsModel.subModels), len(grid))
# Test the default value for option "persistSubModel" to be "true"
testSubPath = temp_path + "/testTrainValidationSplitSubModels"
savingPathWithSubModels = testSubPath + "cvModel3"
tvsModel.save(savingPathWithSubModels)
tvsModel3 = TrainValidationSplitModel.load(savingPathWithSubModels)
self.assertEqual(len(tvsModel3.subModels), len(grid))
tvsModel4 = tvsModel3.copy()
self.assertEqual(len(tvsModel4.subModels), len(grid))
savingPathWithoutSubModels = testSubPath + "cvModel2"
tvsModel.write().option("persistSubModels", "false").save(savingPathWithoutSubModels)
tvsModel2 = TrainValidationSplitModel.load(savingPathWithoutSubModels)
self.assertEqual(tvsModel2.subModels, None)
for i in range(len(grid)):
self.assertEqual(tvsModel.subModels[i].uid, tvsModel3.subModels[i].uid)
def test_save_load_nested_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
tvs = TrainValidationSplit(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
originalParamMap = tvs.getEstimatorParamMaps()
loadedParamMap = loadedTvs.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsCopied = tvs.copy()
tvsModelCopied = tvsModel.copy()
self.assertEqual(tvs.getEstimator().uid, tvsCopied.getEstimator().uid,
"Copied TrainValidationSplit has the same uid of Estimator")
self.assertEqual(tvsModel.bestModel.uid, tvsModelCopied.bestModel.uid)
self.assertEqual(len(tvsModel.validationMetrics),
len(tvsModelCopied.validationMetrics),
"Copied validationMetrics has the same size of the original")
for index in range(len(tvsModel.validationMetrics)):
self.assertEqual(tvsModel.validationMetrics[index],
tvsModelCopied.validationMetrics[index])
class PersistenceTest(SparkSessionTestCase):
def test_linear_regression(self):
lr = LinearRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/lr"
lr.save(lr_path)
lr2 = LinearRegression.load(lr_path)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(type(lr.uid), type(lr2.uid))
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LinearRegression instance uid (%s) did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LinearRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_logistic_regression(self):
lr = LogisticRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/logreg"
lr.save(lr_path)
lr2 = LogisticRegression.load(lr_path)
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LogisticRegression instance uid (%s) "
"did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LogisticRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def _compare_params(self, m1, m2, param):
"""
Compare 2 ML Params instances for the given param, and assert both have the same param value
and parent. The param must be a parameter of m1.
"""
# Prevent key not found error in case of some param in neither paramMap nor defaultParamMap.
if m1.isDefined(param):
paramValue1 = m1.getOrDefault(param)
paramValue2 = m2.getOrDefault(m2.getParam(param.name))
if isinstance(paramValue1, Params):
self._compare_pipelines(paramValue1, paramValue2)
else:
self.assertEqual(paramValue1, paramValue2) # for general types param
# Assert parents are equal
self.assertEqual(param.parent, m2.getParam(param.name).parent)
else:
# If m1 is not defined param, then m2 should not, too. See SPARK-14931.
self.assertFalse(m2.isDefined(m2.getParam(param.name)))
def _compare_pipelines(self, m1, m2):
"""
Compare 2 ML types, asserting that they are equivalent.
This currently supports:
- basic types
- Pipeline, PipelineModel
- OneVsRest, OneVsRestModel
This checks:
- uid
- type
- Param values and parents
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
if isinstance(m1, JavaParams) or isinstance(m1, Transformer):
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
self._compare_params(m1, m2, p)
elif isinstance(m1, Pipeline):
self.assertEqual(len(m1.getStages()), len(m2.getStages()))
for s1, s2 in zip(m1.getStages(), m2.getStages()):
self._compare_pipelines(s1, s2)
elif isinstance(m1, PipelineModel):
self.assertEqual(len(m1.stages), len(m2.stages))
for s1, s2 in zip(m1.stages, m2.stages):
self._compare_pipelines(s1, s2)
elif isinstance(m1, OneVsRest) or isinstance(m1, OneVsRestModel):
for p in m1.params:
self._compare_params(m1, m2, p)
if isinstance(m1, OneVsRestModel):
self.assertEqual(len(m1.models), len(m2.models))
for x, y in zip(m1.models, m2.models):
self._compare_pipelines(x, y)
else:
raise RuntimeError("_compare_pipelines does not yet support type: %s" % type(m1))
def test_pipeline_persistence(self):
"""
Pipeline[HashingTF, PCA]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
pl = Pipeline(stages=[tf, pca])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_python_transformer_pipeline_persistence(self):
"""
Pipeline[MockUnaryTransformer, Binarizer]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.range(0, 10).toDF('input')
tf = MockUnaryTransformer(shiftVal=2)\
.setInputCol("input").setOutputCol("shiftedInput")
tf2 = Binarizer(threshold=6, inputCol="shiftedInput", outputCol="binarized")
pl = Pipeline(stages=[tf, tf2])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_onevsrest(self):
temp_path = tempfile.mkdtemp()
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))] * 10,
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
model = ovr.fit(df)
ovrPath = temp_path + "/ovr"
ovr.save(ovrPath)
loadedOvr = OneVsRest.load(ovrPath)
self._compare_pipelines(ovr, loadedOvr)
modelPath = temp_path + "/ovrModel"
model.save(modelPath)
loadedModel = OneVsRestModel.load(modelPath)
self._compare_pipelines(model, loadedModel)
def test_decisiontree_classifier(self):
dt = DecisionTreeClassifier(maxDepth=1)
path = tempfile.mkdtemp()
dtc_path = path + "/dtc"
dt.save(dtc_path)
dt2 = DecisionTreeClassifier.load(dtc_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeClassifier instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeClassifier instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_decisiontree_regressor(self):
dt = DecisionTreeRegressor(maxDepth=1)
path = tempfile.mkdtemp()
dtr_path = path + "/dtr"
dt.save(dtr_path)
dt2 = DecisionTreeClassifier.load(dtr_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeRegressor instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeRegressor instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_default_read_write(self):
temp_path = tempfile.mkdtemp()
lr = LogisticRegression()
lr.setMaxIter(50)
lr.setThreshold(.75)
writer = DefaultParamsWriter(lr)
savePath = temp_path + "/lr"
writer.save(savePath)
reader = DefaultParamsReadable.read()
lr2 = reader.load(savePath)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(lr.extractParamMap(), lr2.extractParamMap())
# test overwrite
lr.setThreshold(.8)
writer.overwrite().save(savePath)
reader = DefaultParamsReadable.read()
lr3 = reader.load(savePath)
self.assertEqual(lr.uid, lr3.uid)
self.assertEqual(lr.extractParamMap(), lr3.extractParamMap())
class LDATest(SparkSessionTestCase):
def _compare(self, m1, m2):
"""
Temp method for comparing instances.
TODO: Replace with generic implementation once SPARK-14706 is merged.
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
if m1.isDefined(p):
self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))
self.assertEqual(p.parent, m2.getParam(p.name).parent)
if isinstance(m1, LDAModel):
self.assertEqual(m1.vocabSize(), m2.vocabSize())
self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())
def test_persistence(self):
# Test save/load for LDA, LocalLDAModel, DistributedLDAModel.
df = self.spark.createDataFrame([
[1, Vectors.dense([0.0, 1.0])],
[2, Vectors.sparse(2, {0: 1.0})],
], ["id", "features"])
# Fit model
lda = LDA(k=2, seed=1, optimizer="em")
distributedModel = lda.fit(df)
self.assertTrue(distributedModel.isDistributed())
localModel = distributedModel.toLocal()
self.assertFalse(localModel.isDistributed())
# Define paths
path = tempfile.mkdtemp()
lda_path = path + "/lda"
dist_model_path = path + "/distLDAModel"
local_model_path = path + "/localLDAModel"
# Test LDA
lda.save(lda_path)
lda2 = LDA.load(lda_path)
self._compare(lda, lda2)
# Test DistributedLDAModel
distributedModel.save(dist_model_path)
distributedModel2 = DistributedLDAModel.load(dist_model_path)
self._compare(distributedModel, distributedModel2)
# Test LocalLDAModel
localModel.save(local_model_path)
localModel2 = LocalLDAModel.load(local_model_path)
self._compare(localModel, localModel2)
# Clean up
try:
rmtree(path)
except OSError:
pass
class TrainingSummaryTest(SparkSessionTestCase):
def test_linear_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertAlmostEqual(s.explainedVariance, 0.25, 2)
self.assertAlmostEqual(s.meanAbsoluteError, 0.0)
self.assertAlmostEqual(s.meanSquaredError, 0.0)
self.assertAlmostEqual(s.rootMeanSquaredError, 0.0)
self.assertAlmostEqual(s.r2, 1.0, 2)
self.assertAlmostEqual(s.r2adj, 1.0, 2)
self.assertTrue(isinstance(s.residuals, DataFrame))
self.assertEqual(s.numInstances, 2)
self.assertEqual(s.degreesOfFreedom, 1)
devResiduals = s.devianceResiduals
self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class LinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance)
def test_glr_summary(self):
from pyspark.ml.linalg import Vectors
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
glr = GeneralizedLinearRegression(family="gaussian", link="identity", weightCol="weight",
fitIntercept=False)
model = glr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.numIterations, 1) # this should default to a single iteration of WLS
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.numInstances, 2)
self.assertTrue(isinstance(s.residuals(), DataFrame))
self.assertTrue(isinstance(s.residuals("pearson"), DataFrame))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
self.assertEqual(s.degreesOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedomNull, 2)
self.assertEqual(s.rank, 1)
self.assertTrue(isinstance(s.solver, basestring))
self.assertTrue(isinstance(s.aic, float))
self.assertTrue(isinstance(s.deviance, float))
self.assertTrue(isinstance(s.nullDeviance, float))
self.assertTrue(isinstance(s.dispersion, float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class GeneralizedLinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.deviance, s.deviance)
def test_binary_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_multiclass_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], [])),
(2.0, 2.0, Vectors.dense(2.0)),
(2.0, 2.0, Vectors.dense(1.9))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 0.75, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2)
self.assertAlmostEqual(s.weightedRecall, 0.75, 2)
self.assertAlmostEqual(s.weightedPrecision, 0.583, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 0.65, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.65, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_gaussian_mixture_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
gmm = GaussianMixture(k=2)
model = gmm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertTrue(isinstance(s.probability, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_bisecting_kmeans_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
bkm = BisectingKMeans(k=2)
model = bkm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_kmeans_summary(self):
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
class KMeansTests(SparkSessionTestCase):
def test_kmeans_cosine_distance(self):
data = [(Vectors.dense([1.0, 1.0]),), (Vectors.dense([10.0, 10.0]),),
(Vectors.dense([1.0, 0.5]),), (Vectors.dense([10.0, 4.4]),),
(Vectors.dense([-1.0, 1.0]),), (Vectors.dense([-100.0, 90.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=3, seed=1, distanceMeasure="cosine")
model = kmeans.fit(df)
result = model.transform(df).collect()
self.assertTrue(result[0].prediction == result[1].prediction)
self.assertTrue(result[2].prediction == result[3].prediction)
self.assertTrue(result[4].prediction == result[5].prediction)
class OneVsRestTests(SparkSessionTestCase):
def test_copy(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
ovr1 = ovr.copy({lr.maxIter: 10})
self.assertEqual(ovr.getClassifier().getMaxIter(), 5)
self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)
model = ovr.fit(df)
model1 = model.copy({model.predictionCol: "indexed"})
self.assertEqual(model1.getPredictionCol(), "indexed")
def test_output_columns(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, parallelism=1)
model = ovr.fit(df)
output = model.transform(df)
self.assertEqual(output.columns, ["label", "features", "prediction"])
def test_parallelism_doesnt_change_output(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
ovrPar1 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=1)
modelPar1 = ovrPar1.fit(df)
ovrPar2 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=2)
modelPar2 = ovrPar2.fit(df)
for i, model in enumerate(modelPar1.models):
self.assertTrue(np.allclose(model.coefficients.toArray(),
modelPar2.models[i].coefficients.toArray(), atol=1E-4))
self.assertTrue(np.allclose(model.intercept, modelPar2.models[i].intercept, atol=1E-4))
def test_support_for_weightCol(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8), 1.0),
(1.0, Vectors.sparse(2, [], []), 1.0),
(2.0, Vectors.dense(0.5, 0.5), 1.0)],
["label", "features", "weight"])
# classifier inherits hasWeightCol
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, weightCol="weight")
self.assertIsNotNone(ovr.fit(df))
# classifier doesn't inherit hasWeightCol
dt = DecisionTreeClassifier()
ovr2 = OneVsRest(classifier=dt, weightCol="weight")
self.assertIsNotNone(ovr2.fit(df))
class HashingTFTest(SparkSessionTestCase):
def test_apply_binary_term_freqs(self):
df = self.spark.createDataFrame([(0, ["a", "a", "b", "c", "c", "c"])], ["id", "words"])
n = 10
hashingTF = HashingTF()
hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n).setBinary(True)
output = hashingTF.transform(df)
features = output.select("features").first().features.toArray()
expected = Vectors.dense([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).toArray()
for i in range(0, n):
self.assertAlmostEqual(features[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(features[i]))
class GeneralizedLinearRegressionTest(SparkSessionTestCase):
def test_tweedie_distribution(self):
df = self.spark.createDataFrame(
[(1.0, Vectors.dense(0.0, 0.0)),
(1.0, Vectors.dense(1.0, 2.0)),
(2.0, Vectors.dense(0.0, 0.0)),
(2.0, Vectors.dense(1.0, 1.0)), ], ["label", "features"])
glr = GeneralizedLinearRegression(family="tweedie", variancePower=1.6)
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.4645, 0.3402], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.7841, atol=1E-4))
model2 = glr.setLinkPower(-1.0).fit(df)
self.assertTrue(np.allclose(model2.coefficients.toArray(), [-0.6667, 0.5], atol=1E-4))
self.assertTrue(np.isclose(model2.intercept, 0.6667, atol=1E-4))
def test_offset(self):
df = self.spark.createDataFrame(
[(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),
(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),
(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),
(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))], ["label", "weight", "offset", "features"])
glr = GeneralizedLinearRegression(family="poisson", weightCol="weight", offsetCol="offset")
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [0.664647, -0.3192581],
atol=1E-4))
self.assertTrue(np.isclose(model.intercept, -1.561613, atol=1E-4))
class LinearRegressionTest(SparkSessionTestCase):
def test_linear_regression_with_huber_loss(self):
data_path = "data/mllib/sample_linear_regression_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lir = LinearRegression(loss="huber", epsilon=2.0)
model = lir.fit(df)
expectedCoefficients = [0.136, 0.7648, -0.7761, 2.4236, 0.537,
1.2612, -0.333, -0.5694, -0.6311, 0.6053]
expectedIntercept = 0.1607
expectedScale = 9.758
self.assertTrue(
np.allclose(model.coefficients.toArray(), expectedCoefficients, atol=1E-3))
self.assertTrue(np.isclose(model.intercept, expectedIntercept, atol=1E-3))
self.assertTrue(np.isclose(model.scale, expectedScale, atol=1E-3))
class LogisticRegressionTest(SparkSessionTestCase):
def test_binomial_logistic_regression_with_bound(self):
df = self.spark.createDataFrame(
[(1.0, 1.0, Vectors.dense(0.0, 5.0)),
(0.0, 2.0, Vectors.dense(1.0, 2.0)),
(1.0, 3.0, Vectors.dense(2.0, 1.0)),
(0.0, 4.0, Vectors.dense(3.0, 3.0)), ], ["label", "weight", "features"])
lor = LogisticRegression(regParam=0.01, weightCol="weight",
lowerBoundsOnCoefficients=Matrices.dense(1, 2, [-1.0, -1.0]),
upperBoundsOnIntercepts=Vectors.dense(0.0))
model = lor.fit(df)
self.assertTrue(
np.allclose(model.coefficients.toArray(), [-0.2944, -0.0484], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.0, atol=1E-4))
def test_multinomial_logistic_regression_with_bound(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lor = LogisticRegression(regParam=0.01,
lowerBoundsOnCoefficients=Matrices.dense(3, 4, range(12)),
upperBoundsOnIntercepts=Vectors.dense(0.0, 0.0, 0.0))
model = lor.fit(df)
expected = [[4.593, 4.5516, 9.0099, 12.2904],
[1.0, 8.1093, 7.0, 10.0],
[3.041, 5.0, 8.0, 11.0]]
for i in range(0, len(expected)):
self.assertTrue(
np.allclose(model.coefficientMatrix.toArray()[i], expected[i], atol=1E-4))
self.assertTrue(
np.allclose(model.interceptVector.toArray(), [-0.9057, -1.1392, -0.0033], atol=1E-4))
class MultilayerPerceptronClassifierTest(SparkSessionTestCase):
def test_raw_and_probability_prediction(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[4, 5, 4, 3],
blockSize=128, seed=123)
model = mlp.fit(df)
test = self.sc.parallelize([Row(features=Vectors.dense(0.1, 0.1, 0.25, 0.25))]).toDF()
result = model.transform(test).head()
expected_prediction = 2.0
expected_probability = [0.0, 0.0, 1.0]
expected_rawPrediction = [57.3955, -124.5462, 67.9943]
self.assertTrue(result.prediction, expected_prediction)
self.assertTrue(np.allclose(result.probability, expected_probability, atol=1E-4))
self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1E-4))
class FPGrowthTests(SparkSessionTestCase):
def setUp(self):
super(FPGrowthTests, self).setUp()
self.data = self.spark.createDataFrame(
[([1, 2], ), ([1, 2], ), ([1, 2, 3], ), ([1, 3], )],
["items"])
def test_association_rules(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_association_rules = self.spark.createDataFrame(
[([3], [1], 1.0), ([2], [1], 1.0)],
["antecedent", "consequent", "confidence"]
)
actual_association_rules = fpm.associationRules
self.assertEqual(actual_association_rules.subtract(expected_association_rules).count(), 0)
self.assertEqual(expected_association_rules.subtract(actual_association_rules).count(), 0)
def test_freq_itemsets(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_freq_itemsets = self.spark.createDataFrame(
[([1], 4), ([2], 3), ([2, 1], 3), ([3], 2), ([3, 1], 2)],
["items", "freq"]
)
actual_freq_itemsets = fpm.freqItemsets
self.assertEqual(actual_freq_itemsets.subtract(expected_freq_itemsets).count(), 0)
self.assertEqual(expected_freq_itemsets.subtract(actual_freq_itemsets).count(), 0)
def tearDown(self):
del self.data
class ImageReaderTest(SparkSessionTestCase):
def test_read_images(self):
data_path = 'data/mllib/images/kittens'
df = ImageSchema.readImages(data_path, recursive=True, dropImageFailures=True)
self.assertEqual(df.count(), 4)
first_row = df.take(1)[0][0]
array = ImageSchema.toNDArray(first_row)
self.assertEqual(len(array), first_row[1])
self.assertEqual(ImageSchema.toImage(array, origin=first_row[0]), first_row)
self.assertEqual(df.schema, ImageSchema.imageSchema)
self.assertEqual(df.schema["image"].dataType, ImageSchema.columnSchema)
expected = {'CV_8UC3': 16, 'Undefined': -1, 'CV_8U': 0, 'CV_8UC1': 0, 'CV_8UC4': 24}
self.assertEqual(ImageSchema.ocvTypes, expected)
expected = ['origin', 'height', 'width', 'nChannels', 'mode', 'data']
self.assertEqual(ImageSchema.imageFields, expected)
self.assertEqual(ImageSchema.undefinedImageType, "Undefined")
with QuietTest(self.sc):
self.assertRaisesRegexp(
TypeError,
"image argument should be pyspark.sql.types.Row; however",
lambda: ImageSchema.toNDArray("a"))
with QuietTest(self.sc):
self.assertRaisesRegexp(
ValueError,
"image argument should have attributes specified in",
lambda: ImageSchema.toNDArray(Row(a=1)))
with QuietTest(self.sc):
self.assertRaisesRegexp(
TypeError,
"array argument should be numpy.ndarray; however, it got",
lambda: ImageSchema.toImage("a"))
class ImageReaderTest2(PySparkTestCase):
@classmethod
def setUpClass(cls):
super(ImageReaderTest2, cls).setUpClass()
cls.hive_available = True
# Note that here we enable Hive's support.
cls.spark = None
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
cls.hive_available = False
except TypeError:
cls.tearDownClass()
cls.hive_available = False
if cls.hive_available:
cls.spark = HiveContext._createForTesting(cls.sc)
def setUp(self):
if not self.hive_available:
self.skipTest("Hive is not available.")
@classmethod
def tearDownClass(cls):
super(ImageReaderTest2, cls).tearDownClass()
if cls.spark is not None:
cls.spark.sparkSession.stop()
cls.spark = None
def test_read_images_multiple_times(self):
# This test case is to check if `ImageSchema.readImages` tries to
# initiate Hive client multiple times. See SPARK-22651.
data_path = 'data/mllib/images/kittens'
ImageSchema.readImages(data_path, recursive=True, dropImageFailures=True)
ImageSchema.readImages(data_path, recursive=True, dropImageFailures=True)
class ALSTest(SparkSessionTestCase):
def test_storage_levels(self):
df = self.spark.createDataFrame(
[(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
["user", "item", "rating"])
als = ALS().setMaxIter(1).setRank(1)
# test default params
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als.getFinalStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "MEMORY_AND_DISK")
# test non-default params
als.setIntermediateStorageLevel("MEMORY_ONLY_2")
als.setFinalStorageLevel("DISK_ONLY")
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als.getFinalStorageLevel(), "DISK_ONLY")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "DISK_ONLY")
class DefaultValuesTests(PySparkTestCase):
"""
Test :py:class:`JavaParams` classes to see if their default Param values match
those in their Scala counterparts.
"""
def test_java_params(self):
import pyspark.ml.feature
import pyspark.ml.classification
import pyspark.ml.clustering
import pyspark.ml.evaluation
import pyspark.ml.pipeline
import pyspark.ml.recommendation
import pyspark.ml.regression
modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering,
pyspark.ml.evaluation, pyspark.ml.pipeline, pyspark.ml.recommendation,
pyspark.ml.regression]
for module in modules:
for name, cls in inspect.getmembers(module, inspect.isclass):
if not name.endswith('Model') and not name.endswith('Params')\
and issubclass(cls, JavaParams) and not inspect.isabstract(cls):
# NOTE: disable check_params_exist until there is parity with Scala API
ParamTests.check_params(self, cls(), check_params_exist=False)
# Additional classes that need explicit construction
from pyspark.ml.feature import CountVectorizerModel, StringIndexerModel
ParamTests.check_params(self, CountVectorizerModel.from_vocabulary(['a'], 'input'),
check_params_exist=False)
ParamTests.check_params(self, StringIndexerModel.from_labels(['a', 'b'], 'input'),
check_params_exist=False)
def _squared_distance(a, b):
if isinstance(a, Vector):
return a.squared_distance(b)
else:
return b.squared_distance(a)
class VectorTests(MLlibTestCase):
def _test_serialize(self, v):
self.assertEqual(v, ser.loads(ser.dumps(v)))
jvec = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(v)))
nv = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvec)))
self.assertEqual(v, nv)
vs = [v] * 100
jvecs = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(vs)))
nvs = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvecs)))
self.assertEqual(vs, nvs)
def test_serialize(self):
self._test_serialize(DenseVector(range(10)))
self._test_serialize(DenseVector(array([1., 2., 3., 4.])))
self._test_serialize(DenseVector(pyarray.array('d', range(10))))
self._test_serialize(SparseVector(4, {1: 1, 3: 2}))
self._test_serialize(SparseVector(3, {}))
self._test_serialize(DenseMatrix(2, 3, range(6)))
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self._test_serialize(sm1)
def test_dot(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([1, 2, 3, 4])
mat = array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
arr = pyarray.array('d', [0, 1, 2, 3])
self.assertEqual(10.0, sv.dot(dv))
self.assertTrue(array_equal(array([3., 6., 9., 12.]), sv.dot(mat)))
self.assertEqual(30.0, dv.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), dv.dot(mat)))
self.assertEqual(30.0, lst.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), lst.dot(mat)))
self.assertEqual(7.0, sv.dot(arr))
def test_squared_distance(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([4, 3, 2, 1])
lst1 = [4, 3, 2, 1]
arr = pyarray.array('d', [0, 2, 1, 3])
narr = array([0, 2, 1, 3])
self.assertEqual(15.0, _squared_distance(sv, dv))
self.assertEqual(25.0, _squared_distance(sv, lst))
self.assertEqual(20.0, _squared_distance(dv, lst))
self.assertEqual(15.0, _squared_distance(dv, sv))
self.assertEqual(25.0, _squared_distance(lst, sv))
self.assertEqual(20.0, _squared_distance(lst, dv))
self.assertEqual(0.0, _squared_distance(sv, sv))
self.assertEqual(0.0, _squared_distance(dv, dv))
self.assertEqual(0.0, _squared_distance(lst, lst))
self.assertEqual(25.0, _squared_distance(sv, lst1))
self.assertEqual(3.0, _squared_distance(sv, arr))
self.assertEqual(3.0, _squared_distance(sv, narr))
def test_hash(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(hash(v1), hash(v2))
self.assertEqual(hash(v1), hash(v3))
self.assertEqual(hash(v2), hash(v3))
self.assertFalse(hash(v1) == hash(v4))
self.assertFalse(hash(v2) == hash(v4))
def test_eq(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(6, [(1, 1.0), (3, 5.5)])
v5 = DenseVector([0.0, 1.0, 0.0, 2.5])
v6 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(v1, v2)
self.assertEqual(v1, v3)
self.assertFalse(v2 == v4)
self.assertFalse(v1 == v5)
self.assertFalse(v1 == v6)
def test_equals(self):
indices = [1, 2, 4]
values = [1., 3., 2.]
self.assertTrue(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 1., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 2., 2.]))
def test_conversion(self):
# numpy arrays should be automatically upcast to float64
# tests for fix of [SPARK-5089]
v = array([1, 2, 3, 4], dtype='float64')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
v = array([1, 2, 3, 4], dtype='float32')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
def test_sparse_vector_indexing(self):
sv = SparseVector(5, {1: 1, 3: 2})
self.assertEqual(sv[0], 0.)
self.assertEqual(sv[3], 2.)
self.assertEqual(sv[1], 1.)
self.assertEqual(sv[2], 0.)
self.assertEqual(sv[4], 0.)
self.assertEqual(sv[-1], 0.)
self.assertEqual(sv[-2], 2.)
self.assertEqual(sv[-3], 0.)
self.assertEqual(sv[-5], 0.)
for ind in [5, -6]:
self.assertRaises(IndexError, sv.__getitem__, ind)
for ind in [7.8, '1']:
self.assertRaises(TypeError, sv.__getitem__, ind)
zeros = SparseVector(4, {})
self.assertEqual(zeros[0], 0.0)
self.assertEqual(zeros[3], 0.0)
for ind in [4, -5]:
self.assertRaises(IndexError, zeros.__getitem__, ind)
empty = SparseVector(0, {})
for ind in [-1, 0, 1]:
self.assertRaises(IndexError, empty.__getitem__, ind)
def test_sparse_vector_iteration(self):
self.assertListEqual(list(SparseVector(3, [], [])), [0.0, 0.0, 0.0])
self.assertListEqual(list(SparseVector(5, [0, 3], [1.0, 2.0])), [1.0, 0.0, 0.0, 2.0, 0.0])
def test_matrix_indexing(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
expected = [[0, 6], [1, 8], [4, 10]]
for i in range(3):
for j in range(2):
self.assertEqual(mat[i, j], expected[i][j])
for i, j in [(-1, 0), (4, 1), (3, 4)]:
self.assertRaises(IndexError, mat.__getitem__, (i, j))
def test_repr_dense_matrix(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10], True)
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(6, 3, zeros(18))
self.assertTrue(
repr(mat),
'DenseMatrix(6, 3, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ..., \
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], False)')
def test_repr_sparse_matrix(self):
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertTrue(
repr(sm1t),
'SparseMatrix(3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0], True)')
indices = tile(arange(6), 3)
values = ones(18)
sm = SparseMatrix(6, 3, [0, 6, 12, 18], indices, values)
self.assertTrue(
repr(sm), "SparseMatrix(6, 3, [0, 6, 12, 18], \
[0, 1, 2, 3, 4, 5, 0, 1, ..., 4, 5, 0, 1, 2, 3, 4, 5], \
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ..., \
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], False)")
self.assertTrue(
str(sm),
"6 X 3 CSCMatrix\n\
(0,0) 1.0\n(1,0) 1.0\n(2,0) 1.0\n(3,0) 1.0\n(4,0) 1.0\n(5,0) 1.0\n\
(0,1) 1.0\n(1,1) 1.0\n(2,1) 1.0\n(3,1) 1.0\n(4,1) 1.0\n(5,1) 1.0\n\
(0,2) 1.0\n(1,2) 1.0\n(2,2) 1.0\n(3,2) 1.0\n..\n..")
sm = SparseMatrix(1, 18, zeros(19), [], [])
self.assertTrue(
repr(sm),
'SparseMatrix(1, 18, \
[0, 0, 0, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0, 0, 0], [], [], False)')
def test_sparse_matrix(self):
# Test sparse matrix creation.
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self.assertEqual(sm1.numRows, 3)
self.assertEqual(sm1.numCols, 4)
self.assertEqual(sm1.colPtrs.tolist(), [0, 2, 2, 4, 4])
self.assertEqual(sm1.rowIndices.tolist(), [1, 2, 1, 2])
self.assertEqual(sm1.values.tolist(), [1.0, 2.0, 4.0, 5.0])
self.assertTrue(
repr(sm1),
'SparseMatrix(3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0], False)')
# Test indexing
expected = [
[0, 0, 0, 0],
[1, 0, 4, 0],
[2, 0, 5, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1[i, j])
self.assertTrue(array_equal(sm1.toArray(), expected))
for i, j in [(-1, 1), (4, 3), (3, 5)]:
self.assertRaises(IndexError, sm1.__getitem__, (i, j))
# Test conversion to dense and sparse.
smnew = sm1.toDense().toSparse()
self.assertEqual(sm1.numRows, smnew.numRows)
self.assertEqual(sm1.numCols, smnew.numCols)
self.assertTrue(array_equal(sm1.colPtrs, smnew.colPtrs))
self.assertTrue(array_equal(sm1.rowIndices, smnew.rowIndices))
self.assertTrue(array_equal(sm1.values, smnew.values))
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertEqual(sm1t.numRows, 3)
self.assertEqual(sm1t.numCols, 4)
self.assertEqual(sm1t.colPtrs.tolist(), [0, 2, 3, 5])
self.assertEqual(sm1t.rowIndices.tolist(), [0, 1, 2, 0, 2])
self.assertEqual(sm1t.values.tolist(), [3.0, 2.0, 4.0, 9.0, 8.0])
expected = [
[3, 2, 0, 0],
[0, 0, 4, 0],
[9, 0, 8, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1t[i, j])
self.assertTrue(array_equal(sm1t.toArray(), expected))
def test_dense_matrix_is_transposed(self):
mat1 = DenseMatrix(3, 2, [0, 4, 1, 6, 3, 9], isTransposed=True)
mat = DenseMatrix(3, 2, [0, 1, 3, 4, 6, 9])
self.assertEqual(mat1, mat)
expected = [[0, 4], [1, 6], [3, 9]]
for i in range(3):
for j in range(2):
self.assertEqual(mat1[i, j], expected[i][j])
self.assertTrue(array_equal(mat1.toArray(), expected))
sm = mat1.toSparse()
self.assertTrue(array_equal(sm.rowIndices, [1, 2, 0, 1, 2]))
self.assertTrue(array_equal(sm.colPtrs, [0, 2, 5]))
self.assertTrue(array_equal(sm.values, [1, 3, 4, 6, 9]))
def test_norms(self):
a = DenseVector([0, 2, 3, -1])
self.assertAlmostEqual(a.norm(2), 3.742, 3)
self.assertTrue(a.norm(1), 6)
self.assertTrue(a.norm(inf), 3)
a = SparseVector(4, [0, 2], [3, -4])
self.assertAlmostEqual(a.norm(2), 5)
self.assertTrue(a.norm(1), 7)
self.assertTrue(a.norm(inf), 4)
tmp = SparseVector(4, [0, 2], [3, 0])
self.assertEqual(tmp.numNonzeros(), 1)
class VectorUDTTests(MLlibTestCase):
dv0 = DenseVector([])
dv1 = DenseVector([1.0, 2.0])
sv0 = SparseVector(2, [], [])
sv1 = SparseVector(2, [1], [2.0])
udt = VectorUDT()
def test_json_schema(self):
self.assertEqual(VectorUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for v in [self.dv0, self.dv1, self.sv0, self.sv1]:
self.assertEqual(v, self.udt.deserialize(self.udt.serialize(v)))
def test_infer_schema(self):
rdd = self.sc.parallelize([Row(label=1.0, features=self.dv1),
Row(label=0.0, features=self.sv1)])
df = rdd.toDF()
schema = df.schema
field = [f for f in schema.fields if f.name == "features"][0]
self.assertEqual(field.dataType, self.udt)
vectors = df.rdd.map(lambda p: p.features).collect()
self.assertEqual(len(vectors), 2)
for v in vectors:
if isinstance(v, SparseVector):
self.assertEqual(v, self.sv1)
elif isinstance(v, DenseVector):
self.assertEqual(v, self.dv1)
else:
raise TypeError("expecting a vector but got %r of type %r" % (v, type(v)))
class MatrixUDTTests(MLlibTestCase):
dm1 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10])
dm2 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10], isTransposed=True)
sm1 = SparseMatrix(1, 1, [0, 1], [0], [2.0])
sm2 = SparseMatrix(2, 1, [0, 0, 1], [0], [5.0], isTransposed=True)
udt = MatrixUDT()
def test_json_schema(self):
self.assertEqual(MatrixUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for m in [self.dm1, self.dm2, self.sm1, self.sm2]:
self.assertEqual(m, self.udt.deserialize(self.udt.serialize(m)))
def test_infer_schema(self):
rdd = self.sc.parallelize([("dense", self.dm1), ("sparse", self.sm1)])
df = rdd.toDF()
schema = df.schema
self.assertTrue(schema.fields[1].dataType, self.udt)
matrices = df.rdd.map(lambda x: x._2).collect()
self.assertEqual(len(matrices), 2)
for m in matrices:
if isinstance(m, DenseMatrix):
self.assertTrue(m, self.dm1)
elif isinstance(m, SparseMatrix):
self.assertTrue(m, self.sm1)
else:
raise ValueError("Expected a matrix but got type %r" % type(m))
class WrapperTests(MLlibTestCase):
def test_new_java_array(self):
# test array of strings
str_list = ["a", "b", "c"]
java_class = self.sc._gateway.jvm.java.lang.String
java_array = JavaWrapper._new_java_array(str_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), str_list)
# test array of integers
int_list = [1, 2, 3]
java_class = self.sc._gateway.jvm.java.lang.Integer
java_array = JavaWrapper._new_java_array(int_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), int_list)
# test array of floats
float_list = [0.1, 0.2, 0.3]
java_class = self.sc._gateway.jvm.java.lang.Double
java_array = JavaWrapper._new_java_array(float_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), float_list)
# test array of bools
bool_list = [False, True, True]
java_class = self.sc._gateway.jvm.java.lang.Boolean
java_array = JavaWrapper._new_java_array(bool_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), bool_list)
# test array of Java DenseVectors
v1 = DenseVector([0.0, 1.0])
v2 = DenseVector([1.0, 0.0])
vec_java_list = [_py2java(self.sc, v1), _py2java(self.sc, v2)]
java_class = self.sc._gateway.jvm.org.apache.spark.ml.linalg.DenseVector
java_array = JavaWrapper._new_java_array(vec_java_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), [v1, v2])
# test empty array
java_class = self.sc._gateway.jvm.java.lang.Integer
java_array = JavaWrapper._new_java_array([], java_class)
self.assertEqual(_java2py(self.sc, java_array), [])
class ChiSquareTestTests(SparkSessionTestCase):
def test_chisquaretest(self):
data = [[0, Vectors.dense([0, 1, 2])],
[1, Vectors.dense([1, 1, 1])],
[2, Vectors.dense([2, 1, 0])]]
df = self.spark.createDataFrame(data, ['label', 'feat'])
res = ChiSquareTest.test(df, 'feat', 'label')
# This line is hitting the collect bug described in #17218, commented for now.
# pValues = res.select("degreesOfFreedom").collect())
self.assertIsInstance(res, DataFrame)
fieldNames = set(field.name for field in res.schema.fields)
expectedFields = ["pValues", "degreesOfFreedom", "statistics"]
self.assertTrue(all(field in fieldNames for field in expectedFields))
class UnaryTransformerTests(SparkSessionTestCase):
def test_unary_transformer_validate_input_type(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal)\
.setInputCol("input").setOutputCol("output")
# should not raise any errors
transformer.validateInputType(DoubleType())
with self.assertRaises(TypeError):
# passing the wrong input type should raise an error
transformer.validateInputType(IntegerType())
def test_unary_transformer_transform(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal)\
.setInputCol("input").setOutputCol("output")
df = self.spark.range(0, 10).toDF('input')
df = df.withColumn("input", df.input.cast(dataType="double"))
transformed_df = transformer.transform(df)
results = transformed_df.select("input", "output").collect()
for res in results:
self.assertEqual(res.input + shiftVal, res.output)
class EstimatorTest(unittest.TestCase):
def testDefaultFitMultiple(self):
N = 4
data = MockDataset()
estimator = MockEstimator()
params = [{estimator.fake: i} for i in range(N)]
modelIter = estimator.fitMultiple(data, params)
indexList = []
for index, model in modelIter:
self.assertEqual(model.getFake(), index)
indexList.append(index)
self.assertEqual(sorted(indexList), list(range(N)))
if __name__ == "__main__":
from pyspark.ml.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2)
else:
unittest.main(verbosity=2)
|
ddna1021/spark
|
python/pyspark/ml/tests.py
|
Python
|
apache-2.0
| 114,903
|
[
"Gaussian"
] |
ce6b8a444818085cbeffa5d9ccbd3a2595cafcd60fe2bcf77635df2baff4c197
|
#!/usr/bin/python
# coding=utf-8
# Release script for LIAM2
# Licence: GPLv3
from __future__ import print_function
import errno
import fnmatch
import os
import re
import stat
import subprocess
import urllib
import zipfile
from datetime import date
from os import chdir, makedirs
from os.path import exists, getsize, abspath, dirname
from shutil import copytree, copy2, rmtree as _rmtree
from subprocess import check_output, STDOUT, CalledProcessError
WEBSITE = 'liam2.plan.be'
TMP_PATH = r"c:\tmp\liam2_new_release"
#TODO:
# - different announce message for pre-releases
# - announce RC on the website too
# ? create a download page for the rc
# - create a conda environment to store requirements for the release
# create -n liam2-{release} --clone liam2
# or better yet, only store package versions:
# conda env export > doc\bundle_environment.yml
# TODO: add more scripts to implement the "git flow" model
# - hotfix_branch
# - release_branch
# - feature_branch
# - make_release, detects hotfix or release
# ------------- #
# generic tools #
# ------------- #
def size2str(value):
unit = "bytes"
if value > 1024.0:
value /= 1024.0
unit = "Kb"
if value > 1024.0:
value /= 1024.0
unit = "Mb"
return "%.2f %s" % (value, unit)
else:
return "%d %s" % (value, unit)
def generate(fname, **kwargs):
with open('%s.tmpl' % fname) as in_f, open(fname, 'w') as out_f:
out_f.write(in_f.read().format(**kwargs))
def _remove_readonly(function, path, excinfo):
if function in (os.rmdir, os.remove) and excinfo[1].errno == errno.EACCES:
# add write permission to owner
os.chmod(path, stat.S_IWUSR)
# retry removing
function(path)
else:
raise
def rmtree(path):
_rmtree(path, onerror=_remove_readonly)
def call(*args, **kwargs):
try:
return check_output(*args, stderr=STDOUT, **kwargs)
except CalledProcessError, e:
print(e.output)
raise e
def echocall(*args, **kwargs):
print(' '.join(args))
return call(*args, **kwargs)
def git_remote_last_rev(url, branch=None):
"""
:param url: url of the remote repository
:param branch: an optional branch (defaults to 'refs/heads/master')
:return: name/hash of the last revision
"""
if branch is None:
branch = 'refs/heads/master'
output = call('git ls-remote %s %s' % (url, branch))
for line in output.splitlines():
if line.endswith(branch):
return line.split()[0]
raise Exception("Could not determine revision number")
def yes(msg, default='y'):
choices = ' (%s/%s) ' % tuple(c.capitalize() if c == default else c
for c in ('y', 'n'))
answer = None
while answer not in ('', 'y', 'n'):
if answer is not None:
print("answer should be 'y', 'n', or <return>")
answer = raw_input(msg + choices).lower()
return (default if answer == '' else answer) == 'y'
def no(msg, default='n'):
return not yes(msg, default)
def do(description, func, *args, **kwargs):
print(description + '...', end=' ')
func(*args, **kwargs)
print("done.")
def allfiles(pattern, path='.'):
"""
like glob.glob(pattern) but also include files in subdirectories
"""
return (os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(path)
for f in fnmatch.filter(files, pattern))
def zip_pack(archivefname, filepattern):
with zipfile.ZipFile(archivefname, 'w', zipfile.ZIP_DEFLATED) as f:
for fname in allfiles(filepattern):
f.write(fname)
def zip_unpack(archivefname, dest=None):
with zipfile.ZipFile(archivefname) as f:
f.extractall(dest)
def short(release_name):
return release_name[:-2] if release_name.endswith('.0') else release_name
def long_release_name(release_name):
"""
transforms a short release name such as 0.8 to a long one such as 0.8.0
>>> long_release_name('0.8')
'0.8.0'
>>> long_release_name('0.8.0')
'0.8.0'
>>> long_release_name('0.8rc1')
'0.8.0rc1'
>>> long_release_name('0.8.0rc1')
'0.8.0rc1'
"""
dotcount = release_name.count('.')
if dotcount >= 2:
return release_name
assert dotcount == 1, "%s contains %d dots" % (release_name, dotcount)
pos = pretag_pos(release_name)
if pos is not None:
return release_name[:pos] + '.0' + release_name[pos:]
return release_name + '.0'
def pretag_pos(release_name):
"""
gives the position of any pre-release tag
>>> pretag_pos('0.8')
>>> pretag_pos('0.8alpha25')
3
>>> pretag_pos('0.8.1rc1')
5
"""
# 'a' needs to be searched for after 'beta'
for tag in ('rc', 'c', 'beta', 'b', 'alpha', 'a'):
match = re.search(tag + '\d+', release_name)
if match is not None:
return match.start()
return None
def strip_pretags(release_name):
"""
removes pre-release tags from a version string
>>> strip_pretags('0.8')
'0.8'
>>> strip_pretags('0.8alpha25')
'0.8'
>>> strip_pretags('0.8.1rc1')
'0.8.1'
"""
pos = pretag_pos(release_name)
return release_name[:pos] if pos is not None else release_name
def isprerelease(release_name):
"""
tests whether the release name contains any pre-release tag
>>> isprerelease('0.8')
False
>>> isprerelease('0.8alpha25')
True
>>> isprerelease('0.8.1rc1')
True
"""
return pretag_pos(release_name) is not None
def send_outlook(to, subject, body):
subprocess.call('outlook /c ipm.note /m "%s&subject=%s&body=%s"'
% (to, urllib.quote(subject), urllib.quote(body)))
def send_thunderbird(to, subject, body):
# preselectid='id1' selects the first "identity" for the "from" field
# We do not use our usual call because the command returns an exit status
# of 1 (failure) instead of 0, even if it works, so we simply ignore
# the failure.
subprocess.call("thunderbird -compose \"preselectid='id1',"
"to='%s',subject='%s',body='%s'\"" % (to, subject, body))
# -------------------- #
# end of generic tools #
# -------------------- #
def rst2txt(s):
"""
translates rst to raw text
>>> rst2txt(":ref:`matching() <matching>`")
'matching()'
>>> rst2txt(":PR:`123`")
'pull request 123'
>>> rst2txt(":issue:`123`")
'issue 123'
"""
s = re.sub(":ref:`(.+) <.+>`", r"\1", s)
s = re.sub(":PR:`(\d+)`", r"pull request \1", s)
return re.sub(":issue:`(\d+)`", r"issue \1", s)
def relname2fname(release_name):
short_version = short(strip_pretags(release_name))
return r"version_%s.rst.inc" % short_version.replace('.', '_')
def release_changes(release_name):
fpath = "doc\usersguide\source\changes\\" + relname2fname(release_name)
with open(fpath) as f:
return f.read().decode('utf-8-sig')
def release_highlights(release_name):
fpath = "doc\website\highlights\\" + relname2fname(release_name)
with open(fpath) as f:
return f.read().decode('utf-8-sig')
def build_exe():
chdir('src')
call('buildall.bat')
chdir('..')
def build_doc():
chdir('doc')
call('buildall.bat')
chdir('..')
def update_versions(release_name):
# git clone + install will fail sauf si post-release (mais meme dans ce
# cas là, ce ne sera pas précis)
#
# version in archive I do with make_release: OK
# doc\usersguide\source\conf.py
# src\setup.py
# src\main.py
pass
def update_changelog(release_name):
"""
Update release date in changes.rst
"""
fpath = r'doc\usersguide\source\changes.rst'
with open(fpath) as f:
lines = f.readlines()
title = "Version %s" % short(release_name)
if lines[5] != title + '\n':
print("changes.rst not modified (the last release is not %s)"
% title)
return
release_date = lines[8]
if release_date != "In development.\n":
print('changes.rst not modified (the last release date is "%s" '
'instead of "In development.", was it already released?)'
% release_date)
return
lines[8] = "Released on {}.\n".format(date.today().isoformat())
with open(fpath, 'w') as f:
f.writelines(lines)
with open(fpath) as f:
print('\n'.join(f.read().decode('utf-8-sig').splitlines()[:20]))
if no('Does the full changelog look right?'):
exit(1)
call('git commit -m "update release date in changes.rst" %s' % fpath)
def copy_release(release_name):
copytree(r'build\bundle\editor', r'win32\editor')
copytree(r'build\bundle\editor', r'win64\editor')
copytree(r'build\tests\examples', r'win32\examples')
copytree(r'build\tests\examples', r'win64\examples')
copytree(r'build\src\build\exe.win32-2.7', r'win32\liam2')
copytree(r'build\src\build\exe.win-amd64-2.7', r'win64\liam2')
makedirs(r'win32\documentation')
makedirs(r'win64\documentation')
copy2(r'build\doc\usersguide\build\htmlhelp\LIAM2UserGuide.chm',
r'win32\documentation\LIAM2UserGuide.chm')
copy2(r'build\doc\usersguide\build\htmlhelp\LIAM2UserGuide.chm',
r'win64\documentation\LIAM2UserGuide.chm')
# stuff not in the bundles
copy2(r'build\doc\usersguide\build\latex\LIAM2UserGuide.pdf',
r'LIAM2UserGuide-%s.pdf' % release_name)
copy2(r'build\doc\usersguide\build\htmlhelp\LIAM2UserGuide.chm',
r'LIAM2UserGuide-%s.chm' % release_name)
copytree(r'build\doc\usersguide\build\html', 'htmldoc')
copytree(r'build\doc\usersguide\build\web',
r'webdoc\%s' % short(release_name))
def create_bundles(release_name):
chdir('win32')
zip_pack(r'..\LIAM2Suite-%s-win32.zip' % release_name, '*')
chdir('..')
chdir('win64')
zip_pack(r'..\LIAM2Suite-%s-win64.zip' % release_name, '*')
chdir('..')
chdir('htmldoc')
zip_pack(r'..\LIAM2UserGuide-%s-html.zip' % release_name, '*')
chdir('..')
def test_executable(relpath):
"""
test an executable with relative path *relpath*
"""
print()
# we use --debug so that errorlevel is set
main_dbg = relpath + r'\main --debug '
echocall(main_dbg + r'run tests\functional\generate.yml')
echocall(main_dbg + r'import tests\functional\import.yml')
echocall(main_dbg + r'run tests\functional\simulation.yml')
echocall(main_dbg + r'run tests\functional\variant.yml')
echocall(main_dbg + r'run tests\functional\matching.yml')
echocall(main_dbg + r'run tests\examples\demo01.yml')
echocall(main_dbg + r'import tests\examples\demo_import.yml')
echocall(main_dbg + r'run tests\examples\demo01.yml')
echocall(main_dbg + r'run tests\examples\demo02.yml')
echocall(main_dbg + r'run tests\examples\demo03.yml')
echocall(main_dbg + r'run tests\examples\demo04.yml')
echocall(main_dbg + r'run tests\examples\demo05.yml')
echocall(main_dbg + r'run tests\examples\demo06.yml')
echocall(main_dbg + r'run tests\examples\demo07.yml')
echocall(main_dbg + r'run tests\examples\demo08.yml')
echocall(main_dbg + r'run tests\examples\demo09.yml')
def test_executables():
"""
assumes to be in build
"""
for arch in ('win32', 'win-amd64'):
test_executable(r'src\build\exe.%s-2.7' % arch)
def check_bundles(release_name):
"""
checks the bundles unpack correctly
"""
makedirs('test')
zip_unpack('LIAM2Suite-%s-win32.zip' % release_name, r'test\win32')
zip_unpack('LIAM2Suite-%s-win64.zip' % release_name, r'test\win64')
zip_unpack('LIAM2UserGuide-%s-html.zip' % release_name, r'test\htmldoc')
zip_unpack('LIAM2-%s-src.zip' % release_name, r'test\src')
rmtree('test')
def build_website(release_name):
fnames = ["LIAM2Suite-%s-win32.zip", "LIAM2Suite-%s-win64.zip",
"LIAM2-%s-src.zip"]
s32b, s64b, ssrc = [size2str(getsize(fname % release_name))
for fname in fnames]
chdir(r'build\doc\website')
generate(r'conf.py', version=short(release_name))
generate(r'pages\download.rst',
version=release_name, short_version=short(release_name),
size32b=s32b, size64b=s64b, sizesrc=ssrc)
generate(r'pages\documentation.rst',
version=release_name, short_version=short(release_name))
title = 'Version %s released' % short(release_name)
# strip is important otherwise fname contains a \n and git chokes on it
fname = call('tinker --filename --post "%s"' % title).strip()
call('buildall.bat')
call('start ' + abspath(r'blog\html\index.html'), shell=True)
call('start ' + abspath(r'blog\html\pages\download.html'), shell=True)
call('start ' + abspath(r'blog\html\pages\documentation.html'), shell=True)
if no('Does the website look good?'):
exit(1)
call('git add master.rst')
call('git add %s' % fname)
call('git commit -m "announce version %s on website"' % short(release_name))
chdir(r'..\..\..')
copytree(r'build\doc\website\blog\html', 'website')
def upload(release_name):
# pscp is the scp provided in PuTTY's installer
base_url = '%s@%s:%s' % ('cic', WEBSITE, WEBSITE)
# 1) archives
subprocess.call(r'pscp * %s/download' % base_url)
# 2) documentation
chdir('webdoc')
subprocess.call(r'pscp -r %s %s/documentation' % (short(release_name),
base_url))
chdir('..')
# 3) website
if not isprerelease(release_name):
chdir('website')
subprocess.call(r'pscp -r * %s' % base_url)
chdir('..')
def announce(release_name):
# ideally we should use the html output of the rst file, but this is simpler
changes = rst2txt(release_changes(release_name))
body = """\
I am pleased to announce that version %s of LIAM2 is now available.
%s
More details and the complete list of changes are available below.
This new release can be downloaded on our website:
http://liam2.plan.be/pages/download.html
As always, *any* feedback is very welcome, preferably on the liam2-users
mailing list: liam2-users@googlegroups.com (you need to register to be
able to post).
%s
""" % (short(release_name), release_highlights(release_name), changes)
send_outlook('liam2-announce@googlegroups.com',
'Version %s released' % short(release_name),
body)
def cleanup():
rmtree('win32')
rmtree('win64')
rmtree('build')
def branchname(statusline):
"""
computes the branch name from a "git status -b -s" line
## master...origin/master
"""
statusline = statusline.replace('#', '').strip()
pos = statusline.find('...')
return statusline[:pos] if pos != -1 else statusline
def make_release(release_name=None, branch='master'):
if release_name is not None:
if 'pre' in release_name:
raise ValueError("'pre' is not supported anymore, use 'alpha' or "
"'beta' instead")
if '-' in release_name:
raise ValueError("- is not supported anymore")
release_name = long_release_name(release_name)
# releasing from the local clone has the advantage I can prepare the
# release offline and only push and upload it when I get back online
repository = abspath(dirname(__file__))
s = "Using local repository at: %s !" % repository
print("\n", s, "\n", "=" * len(s), "\n", sep='')
status = call('git status -s -b')
lines = status.splitlines()
statusline, lines = lines[0], lines[1:]
curbranch = branchname(statusline)
if curbranch != branch:
print("%s is not the current branch (%s). "
"Please use 'git checkout %s'." % (branch, curbranch, branch))
exit(1)
if lines:
uncommited = sum(1 for line in lines if line[1] in 'MDAU')
untracked = sum(1 for line in lines if line.startswith('??'))
print('Warning: there are %d files with uncommitted changes '
'and %d untracked files:' % (uncommited, untracked))
print('\n'.join(lines))
if no('Do you want to continue?'):
exit(1)
ahead = call('git log --format=format:%%H origin/%s..%s' % (branch, branch))
num_ahead = len(ahead.splitlines())
print("Branch '%s' is %d commits ahead of 'origin/%s'"
% (branch, num_ahead, branch), end='')
if num_ahead:
if yes(', do you want to push?'):
do('Pushing changes', call, 'git push')
else:
print()
rev = git_remote_last_rev(repository, 'refs/heads/%s' % branch)
public_release = release_name is not None
if release_name is None:
# take first 7 digits of commit hash
release_name = rev[:7]
if no('Release version %s (%s)?' % (release_name, rev)):
exit(1)
if exists(TMP_PATH):
rmtree(TMP_PATH)
makedirs(TMP_PATH)
chdir(TMP_PATH)
# make a temporary clone in /tmp. The goal is to make sure we do not
# include extra/unversioned files. For the -src archive, I don't think
# there is a risk given that we do it via git, but the risk is there for
# the bundles (src/build is not always clean, examples, editor, ...)
# Since this script updates files (update_changelog and build_website), we
# need to get those changes propagated to GitHub. I do that by updating the
# temporary clone then push twice: first from the temporary clone to the
# "working copy clone" (eg ~/devel/liam2) then to GitHub from there. The
# alternative to modify the "working copy clone" directly is worse because
# it needs more complicated path handling that the 2 push approach.
do('Cloning', call, 'git clone -b %s %s build' % (branch, repository))
# ---------- #
chdir('build')
# ---------- #
print()
print(call('git log -1').decode('utf8'))
print()
if no('Does that last commit look right?'):
exit(1)
if public_release:
print(release_changes(release_name))
if no('Does the release changelog look right?'):
exit(1)
if public_release:
test_release = True
else:
test_release = yes('Do you want to test the executables after they are '
'created?')
do('Building executables', build_exe)
if test_release:
do('Testing executables', test_executables)
if public_release:
do('Updating changelog', update_changelog, release_name)
do('Building doc', build_doc)
do('Creating source archive', call,
r'git archive --format zip --output ..\LIAM2-%s-src.zip %s'
% (release_name, rev))
# ------- #
chdir('..')
# ------- #
do('Moving stuff around', copy_release, release_name)
do('Creating bundles', create_bundles, release_name)
do('Testing bundles', check_bundles, release_name)
if public_release:
if not isprerelease(release_name):
do('Building website (news, download and documentation pages)',
build_website, release_name)
msg = """Is the release looking good? If so, the tag will be created and
pushed, everything will be uploaded to the production server and the release
will be announced. Stuff to watch out for:
* version numbers (executable & doc first page & changelog)
* website
* ...
"""
if no(msg):
exit(1)
# ---------- #
chdir('build')
# ---------- #
do('Tagging release', call,
'git tag -a %(name)s -m "tag release %(name)s"'
% {'name': release_name})
# ------- #
chdir('..')
# ------- #
do('Uploading', upload, release_name)
# ---------- #
chdir('build')
# ---------- #
do('Announcing', announce, release_name)
# ------- #
chdir('..')
# ------- #
chdir(repository)
# We used to push from /tmp to the local repository but you cannot push
# to the currently checked out branch of a repository, so we need to
# pull changes instead. However pull (or merge) add changes to the
# current branch, hence we make sure at the beginning of the script
# that the current git branch is the branch to release. It would be
# possible to do so without a checkout by using:
# git fetch {tmp_path} {branch}:{branch}
# instead but then it only works for fast-forward and non-conflicting
# changes. So if the working copy is dirty, you are out of luck.
# pull the website & changelog commits to the branch (usually master)
# and the release tag (which refers to the last commit)
do('Pulling changes in %s' % repository, call,
'git pull --ff-only %s\\build %s' % (TMP_PATH, branch))
do('Pushing to GitHub', call,
'git push origin %s --follow-tags' % branch)
chdir(TMP_PATH)
do('Cleaning up', cleanup)
if __name__ == '__main__':
from sys import argv
# chdir(r'c:\tmp')
# chdir('liam2_new_release')
make_release(*argv[1:])
# update_changelog(*argv[1:])
|
cbenz/liam2
|
make_release.py
|
Python
|
gpl-3.0
| 21,316
|
[
"TINKER"
] |
78bb577200ee7c836f1fd6578258e0984c312775fa8a25a88412cc0102763529
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Development script of the ChemEnv utility to get the explicit permutations for coordination environments identified
with the explicit permutations algorithms (typically with coordination numbers <= 6)
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import ExplicitPermutationsAlgorithm
import numpy as np
import itertools
import json
import os
class Algo(object):
pass
if __name__ == '__main__':
# Choose the geometry
allcg = AllCoordinationGeometries()
while True:
cg_symbol = input('Enter symbol of the geometry for which you want to get the explicit permutations : ')
try:
cg = allcg[cg_symbol]
break
except LookupError:
print('Wrong geometry, try again ...')
continue
# Check if the algorithm currently defined for this geometry corresponds to the explicit permutation algorithm
for algo in cg.algorithms:
if algo.algorithm_type != 'EXPLICIT_PERMUTATIONS':
raise ValueError('WRONG ALGORITHM !')
algo = Algo()
algo.permutations = []
for perm in itertools.permutations(range(cg.coordination)):
algo.permutations.append(perm)
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
lgf.setup_test_perfect_environment(cg_symbol, randomness=True, indices='ORDERED')
lgf.perfect_geometry = AbstractGeometry.from_cg(cg=cg)
points_perfect = lgf.perfect_geometry.points_wocs_ctwocc()
res = lgf.coordination_geometry_symmetry_measures_standard(coordination_geometry=cg,
algo=algo,
points_perfect=points_perfect)
(csms, perms, algos, local2perfect_maps, perfect2local_maps) = res
csms_with_recorded_permutation = []
explicit_permutations = []
for icsm, csm in enumerate(csms):
found = False
for csm2 in csms_with_recorded_permutation:
if np.isclose(csm, csm2):
found = True
break
if not found:
csms_with_recorded_permutation.append(csm)
explicit_permutations.append(perms[icsm])
print('Permutations found : ')
print(explicit_permutations)
print('Current algorithm(s) :')
for algo in cg.algorithms:
print(algo)
if algo.algorithm_type == 'EXPLICIT_PERMUTATIONS':
print(algo.permutations)
else:
raise ValueError('WRONG ALGORITHM !')
test = input('Save it ? ("y" to confirm)')
if test == 'y':
if len(cg.algorithms) != 1:
raise ValueError('Multiple algorithms !')
cg._algorithms = [ExplicitPermutationsAlgorithm(permutations=explicit_permutations)]
newgeom_dir = 'new_geometry_files'
if not os.path.exists(newgeom_dir):
os.makedirs(newgeom_dir)
f = open('{}/{}.json'.format(newgeom_dir, cg_symbol), 'w')
json.dump(cg.as_dict(), f)
f.close()
|
gVallverdu/pymatgen
|
dev_scripts/chemenv/explicit_permutations.py
|
Python
|
mit
| 3,736
|
[
"pymatgen"
] |
1fdda7bbde85cef6e12d5f64bdce66a191741363c3bdaf689c287473c483720e
|
"""
Pawel Potocki - vtk surface canvas
"""
import copy
import math
import numpy
import vtk
import logging
import functools32 as func
import colors
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.DEBUG)
def colorAsFloatValues(color):
""" convert color values """
return color[0] / 255., color[1] / 255., color[2] / 255.
def calcStep(step):
""" calculate step """
if step == 0:
return 1
fact = math.floor(math.log10(step))
fraction = float(step) / math.pow(10, fact)
if fraction < 1.5:
fraction = 1
elif fraction < 3:
fraction = 2
elif fraction < 7:
fraction = 5
else:
fraction = 10
final_step = math.pow(10, fact) * fraction
return final_step
class VTKSurfaceConfig(object):
""" VTK Canvas configurations """
def __init__(self):
""" default init """
self._labelFormat = '%6.4g'
self._xLabelsFormat = '%6.4g'
self._yLabelsFormat = '%6.4g'
self._zLabelsFormat = '%6.4g'
self._xLabel = 'X'
self._yLabel = 'Y'
self._zLabel = 'Z'
self._nLabels = 5
self._lookupTables = colors.load_colormaps()
self._rotateX = '30'
self._rotateY = '30'
self._debug = True
self._colorMaps = colors.COLOR_MAPS
def Debug(self):
""" debug on/off flag """
return self._debug
def RotateX(self):
""" rotate angle by X axis """
return self._rotateX
def RotateY(self):
""" rotate angle by Y axis """
return self._rotateY
def LabelFormat(self):
""" get labels format """
return self._labelFormat
def XLabelsFormat(self):
""" get x labels format """
return self._xLabelsFormat
def YLabelsFormat(self):
""" get y labels format """
return self._yLabelsFormat
def ZLabelsFormat(self):
""" get z labels format """
return self._zLabelsFormat
def XLabel(self):
""" get x label """
return self._xLabel
def YLabel(self):
""" get y label """
return self._yLabel
def ZLabel(self):
""" get z label """
return self._zLabel
def NLabels(self):
""" get number of labels """
return self._nLabels
def LookupTables(self):
""" get lookup tables """
return self._lookupTables
def ColorMaps(self):
""" color maps """
return self._colorMaps
def printColorMaps():
config = VTKSurfaceConfig()
for c in config.ColorMaps():
print '\t' + c
def getColorMaps():
config = VTKSurfaceConfig()
return config.ColorMaps()
def remap(data):
""" simple remapping - take x and y as natural indexes from 2D numpy array """
if isinstance(data, (list, tuple)):
return data
ndim = data.ndim
shape = data.shape
if ndim != 2 and len(shape) != 2:
return []
return [(x, [(y, data[x, y]) for y in xrange(0, shape[1])]) for x in xrange(0, shape[0])]
def convertData(x_yzv_pairs, **kwargs):
""" convert x_yzv value pars to vtk compatible data """
if x_yzv_pairs is None:
return [], [], [], []
x = []
y = []
z = []
v = []
hasV = False
try:
if len(x_yzv_pairs[0][1][0]) == 3:
hasV = True
except:
hasV = False
row = 0
for d in x_yzv_pairs:
x.append(d[0])
for yzv in d[1]:
if row == 0:
y.append(yzv[0])
z.append(yzv[1])
if hasV:
v.append(yzv[2])
row += 1
x = numpy.array(x)
y = numpy.array(y)
z = numpy.array(z).reshape(x.size, y.size)
if v:
v = numpy.array(v).reshape(x.size, y.size)
return x, y, z, v
class VTKSurface3D(object):
def __init__(self, x_yzv_pairs, **kwargs):
"""
default init
"""
self.reset()
self.kwargs = kwargs
self.parent = kwargs.get('parent', None)
self.bgColor = kwargs.get('bgColor', colors.GRAY)
self.fgColor = colorAsFloatValues(kwargs.get('fgColor', colors.WHITE))
self.config = kwargs.get('config', VTKSurfaceConfig())
self.appName = kwargs.get('appName', 'vtkApp')
self.Points = None
self.Colors = None
self.actors = []
self.autoZRange = kwargs.get('autoZRange', False)
self.customZRange = kwargs.get('customZRange', None)
self.rotateX = kwargs.get('rotateX', 30)
self.rotateY = kwargs.get('rotateY', 30)
self.rotateZ = kwargs.get('rotateZ', 120)
self.posFactor = kwargs.get('posFactor', 1)
self.zoomFactor = kwargs.get('zoomFactor', 1)
self.fontFactor = kwargs.get('fontFactor', 0.75)
self.opacitySlice = kwargs.get('opacitySlice', 0.55)
self.callbacks = copy.copy(kwargs.get('callbacks', {}))
self.logToFile = kwargs.get('logToFile', False)
self.renderer = kwargs.get('renderer', None)
self.doRender = kwargs.get('doRender', True)
self.bounds = None
self.center = None
self.geom = None
self.out = None
self.mapper = None
self.warp = None
self.colorbar = None
if self.renderer is None:
raise Exception('No renderer defined ')
if self.logToFile:
self.setupLogging()
if self.doRender:
self.render_surface(x_yzv_pairs, **kwargs)
if self.hasData:
self.calculatePositions()
self.setDefaultView()
def SetValue(self, x_yzv_pairs, **kwargs):
self.config = kwargs.get('config', VTKSurfaceConfig())
self.clear()
self.render_surface(x_yzv_pairs, **kwargs)
if self.hasData:
self.calculatePositions()
self.setDefaultView()
def fireCallbacks(self, callback=None):
callFunc = self.callbacks.get(callback, None)
if callFunc and callable(callFunc):
if callback == 'OnDataRange':
callFunc(self.getDataRanges())
if callback == 'OnCutterDataSet':
callFunc(self.getCutterData())
else:
for callback, callFunc in self.callbacks.iteritems():
if callback == 'OnDataRange' and callable(callFunc):
callFunc(self.getDataRanges())
if callback == 'OnCutterDataSet' and callable(callFunc):
callFunc(self.getCutterData())
def redraw( self ):
""" invalidate graph """
if self.parent:
self.parent.invalidate()
def clear(self):
"""
clear current actors
"""
for a in self.actors:
self.renderer.RemoveActor(a)
self.actors = []
def getActors(self):
""" get current modeled actors """
return self.actors
def render_surface(self, x_yzv_pairs, **kwargs):
""" render surface with data """
self.reset()
doRemap = kwargs.get('remapData', False)
if doRemap:
x_yzv_pairs = remap( x_yzv_pairs )
(x, y, z, v) = convertData(x_yzv_pairs, **kwargs)
if self.render_geometry(x, y, z, v, **kwargs):
self.render(**kwargs)
def re_render_surface(self):
""" re render the surface """
nargs = copy.copy(self.kwargs)
self.render(**nargs)
def getActiveCamera(self):
""" get active camera """
return self.renderer.GetActiveCamera()
def resetCamera(self):
""" reset camera """
self.renderer.ResetCamera()
self.renderer.ResetCameraClippingRange()
self.redraw()
def calculatePositions(self):
""" calcualte position """
self.bounds = self.mapper.GetBounds()
self.center = self.mapper.GetCenter()
xx = max(self.bounds[1] - self.center[0], self.center[0] - self.bounds[0])
yy = max(self.bounds[3] - self.center[1], self.center[1] - self.bounds[2])
zz = max(self.bounds[5] - self.center[2], self.center[2] - self.bounds[4])
if self.zoomFactor <= 0:
self.zoomFactor = 1
p = max(xx, yy, zz)
d = 5 * self.posFactor
self.distance = d * p
self.position = (0, self.distance, self.center[2])
def setDefaultView(self):
""" set default view """
cam = vtk.vtkCamera()
cam.ParallelProjectionOff()
cam.SetViewUp(0, 0, 1)
cam.SetPosition(self.position)
cam.SetFocalPoint(self.center)
cam.Azimuth(self.rotateZ)
cam.Elevation(self.rotateX)
self.renderer.SetActiveCamera(cam)
self.renderer.ResetCamera()
cam.Zoom(self.zoomFactor)
self.renderer.ResetCameraClippingRange()
self.redraw()
def setYAxisView(self):
""" set y axis view """
cam = vtk.vtkCamera()
cam.ParallelProjectionOff()
cam.SetViewUp(0, 0, 1)
cam.SetPosition(0, -self.distance, 0)
cam.SetFocalPoint(0, -self.center[2], 0)
self.renderer.SetActiveCamera(cam)
self.renderer.ResetCamera()
self.renderer.ResetCameraClippingRange()
self.redraw()
def setZAxisView(self):
""" set z axis view """
cam = vtk.vtkCamera()
cam.ParallelProjectionOff()
cam.SetViewUp(0, 1, 0)
cam.SetPosition(0, 0, self.distance)
cam.SetFocalPoint(0, 0, -self.center[2])
self.renderer.SetActiveCamera(cam)
self.renderer.ResetCamera()
self.renderer.ResetCameraClippingRange()
self.redraw()
def setXAxisView(self):
""" set x axis view """
cam = vtk.vtkCamera()
cam.ParallelProjectionOff()
cam.SetViewUp(0, 0, 0)
cam.SetPosition(self.distance, 0, 0)
cam.SetFocalPoint(self.center[2], 0, 0)
cam.Roll(270)
self.renderer.SetActiveCamera(cam)
self.renderer.ResetCamera()
self.renderer.ResetCameraClippingRange()
self.redraw()
def setupLogging(self):
""" setup local login """
import os
temp = os.path.split(os.tempnam())[0]
app = ''.join(c for c in self.appName if 'a' <= c.lower() <= 'z') + '.log.'
logName = os.path.join(temp, app)
fileOutputWindow = vtk.vtkFileOutputWindow()
fileOutputWindow.SetFileName(logName)
outputWindow = vtk.vtkOutputWindow().GetInstance()
if outputWindow:
outputWindow.SetInstance(fileOutputWindow)
def reset(self):
""" clear all properties """
self.distance = 1.0
self.position = 1.0
self.XScale = 1.0
self.YScale = 1.0
self.ZScale = 1.0
self.XLimit = (0, 0)
self.YLimit = (0, 0)
self.ZLimit = (0, 0)
self.hasData = False
self.gridfunc = None
self.mapper = None
self.Points = None
self.Colors = None
self.XCutterTransform = None
self.YCutterTransform = None
self.ZCutterTransform = None
self.xplane = None
self.yplane = None
self.zplane = None
self.XCutter = None
self.YCutter = None
self.ZCutter = None
self.XCutterMapper = None
self.YCutterMapper = None
self.ZCutterMapper = None
self.XCutterFactor = 1
self.YCutterFactor = 1
self.ZCutterFactor = 1
self.XCutterDelta = None
self.YCutterDelta = None
self.ZCutterDelta = None
self.rotateX = 30
self.rotateY = 30
self.rotateZ = 120
self.zoomFactor = 1
self.posFactor = 1
def getCutterData(self):
"""
Return Cutters data
"""
return self.getXCutterData(), self.getYCutterData()
@func.lru_cache
def _getXScale(self):
""" calculate limit factor and cache it """
dlim = self.YLimit
plim = (self.XCutterMapper.GetBounds()[2], self.XCutterMapper.GetBounds()[3])
scl = (plim[1] - plim[0]) / (dlim[1] - dlim[0])
if scl < 0:
scl = 1
return scl
def getXCutterData(self):
""" Return X Cutters data """
if not self.XCutterMapper:
return None
scl = self._getXScale().__wrapped__
def scale(p):
return p / scl
self.XCutterMapper.Update()
out = self.XCutterMapper.GetInput()
data = [(scale(out.GetPoint(i)[1]), out.GetPoint(i)[2]) for i in xrange(out.GetNumberOfPoints())]
data = sorted(data, key=lambda s: s[0])
return data
@func.lru_cache
def _getYscale(self):
""" calculate limit factor and cache it """
dlim = self.XLimit
plim = (self.YCutterMapper.GetBounds()[0], self.YCutterMapper.GetBounds()[1])
scl = (plim[1] - plim[0]) / (dlim[1] - dlim[0])
if scl < 0:
scl = 1
return scl
def getYCutterData(self):
""" Return Y Cutters data """
if not self.YCutterMapper:
return None
scl = self._getYscale().__wrapped__
def scale(p):
return p / scl
self.YCutterMapper.Update()
out = self.YCutterMapper.GetInput()
data = [(scale(out.GetPoint(i)[0]), out.GetPoint(i)[2]) for i in xrange(out.GetNumberOfPoints())]
data = sorted(data, key=lambda s: s[0])
return data
@func.lru_cache
def _getXYScale(self):
""" calculate limit factor and cache it """
bounds = self.mapper.GetBounds()
xlim = self.XLimit
xplim = (bounds[0], bounds[1])
xscl = (xplim[1] - xplim[0]) / (xlim[1] - xlim[0])
if xscl < 0:
xscl = 1
ylim = self.YLimit
yplim = (bounds[2], bounds[3])
yscl = (yplim[1] - yplim[0]) / (ylim[1] - ylim[0])
if yscl < 0:
yscl = 1
return xscl, yscl
def getZCutterData(self):
""" Return Z Cutters data """
if not self.ZCutterMapper:
return None
xscale, yscale = self._getXYScale().__wrapped__
def scalex(p):
return p / xscale
def scaley(p):
return p / yscale
self.ZCutterMapper.Update()
out = self.ZCutterMapper.GetInput()
data = [(scalex(out.GetPoint(i)[0]), scaley(out.GetPoint(i)[1])) for i in xrange(out.GetNumberOfPoints())]
data = sorted(data, key=lambda s: s[0])
return data
def getDataRanges(self):
""" Return data ranges """
return self.XLimit, self.YLimit, self.ZLimit
def getXRange(self):
"""
get current xrange
"""
return self.XLimit[1] - self.XLimit[0]
def getYRange(self):
"""
get current yrange
"""
return self.YLimit[1] - self.YLimit[0]
def getZRange(self):
"""
get current z range
"""
return self.ZLimit[1] - self.ZLimit[0]
def computeScale(self, shape):
bounds = shape.GetBounds()
bx = bounds[1] - bounds[0]
by = bounds[3] - bounds[2]
bz = bounds[5] - bounds[4]
mx = min(bx, by)
zs = float(bz) / float(mx)
return zs
def render_geometry(self, vx, vy, vz, mv, **kwargs):
""" create geometry """
self.hasData = False
gridData = kwargs.get('gridData', True)
if gridData:
self.gridfunc = vtk.vtkStructuredGrid()
else:
self.gridfunc = vtk.vtkRectilinearGrid()
# check data structure
if type(vx) is not numpy.ndarray or type(vy) is not numpy.ndarray:
logging.error('X,Y vectors must be numpy arrays')
return False
if type(vz) is not numpy.ndarray:
logging.error('Z vector must be numpy array')
return False
if isinstance(mv, (list, tuple)) and len(mv) > 0:
logging.error('V scalars must be numpy array')
return False
if len(vx) == 0 or len(vy) == 0 or len(vz) == 0:
logging.error('Zero size data')
return False
mva = isinstance(mv, numpy.ndarray) and mv.any()
if vz.ndim != 2:
logging.error('Z must be 2 dimensional numpy matrix')
return False
if vz[0].size != vy.size:
logging.error('Y dimension not match')
return False
if vz.transpose()[0].size != vx.size:
logging.error('X dimension not match')
return False
if mva:
if mv.size != vz.size or vz[0].size != mv[0].size or vz[1].size != mv[1].size:
logging.error('Z and V dimension not match')
return False
Nx = vx.size
Ny = vy.size
Nz = vz.size
# put data, z, into a 2D structured grid
self.Points = vtk.vtkPoints()
[self.Points.InsertNextPoint(vx[i], vy[j], vz[i, j]) for j in xrange(Ny) for i in xrange(Nx)]
if gridData:
self.gridfunc.SetDimensions(Nx, Ny, 1)
self.gridfunc.SetPoints(self.Points)
else:
xCoords = vtk.vtkFloatArray()
[xCoords.InsertNextValue(vx[i]) for i in xrange(Nx)]
yCoords = vtk.vtkFloatArray()
[yCoords.InsertNextValue(vy[j]) for j in xrange(Ny)]
s = list(vz.flatten())
vz = numpy.array(s)
# vz.sort()
Nz = vz.size
zCoords = vtk.vtkFloatArray()
[zCoords.InsertNextValue(vz[k]) for k in xrange(Nz)]
vCoords = vtk.vtkFloatArray()
[vCoords.InsertNextValue(n) for n in xrange(1)]
self.gridfunc.SetDimensions(Nx, Ny, 1)
self.gridfunc.SetXCoordinates(xCoords)
self.gridfunc.SetYCoordinates(yCoords)
self.gridfunc.SetZCoordinates(vCoords)
# get scalar field from z/v-values
self.Colors = vtk.vtkFloatArray()
self.Colors.SetNumberOfComponents(1)
self.Colors.SetNumberOfTuples(Nx * Ny)
pt = mv if mva else vz
[self.Colors.InsertComponent(i + j * Nx, 0, pt[i, j]) for j in xrange(Ny) for i in xrange(Nx)]
self.gridfunc.GetPointData().SetScalars(self.Colors)
self.hasData = True
# scaling
Xrange = vx.max() - vx.min()
Yrange = vy.max() - vy.min()
# filter none
Zrange = numpy.nanmax(vz) - numpy.nanmin(vz)
# must have x or y ranges
if 0 in (Xrange, Yrange):
logging.error('Zero X or Y Axis Range: %s', (Xrange, Yrange))
self.hasData = False
raise Exception('Zero X or Y Axis Range: %s', (Xrange, Yrange))
self.XLimit = (vx.min(), vx.max())
self.YLimit = (vy.min(), vy.max())
self.ZLimit = (numpy.nanmin(vz), numpy.nanmax(vz))
# check for constant Z range
if Zrange == 0:
Zrange = max(Xrange, Yrange)
self.XScale = float(Zrange) / float(Xrange)
self.YScale = float(Zrange) / float(Yrange)
self.ZScale = float(Zrange) / float(Zrange)
# fire callbacks
if self.hasData:
self.fireCallbacks(callback='OnDataRange')
logging.debug('Parameters: %s' % str((
Nx, Ny, Nz, ':', Xrange, Yrange, Zrange, ':', self.XScale, self.YScale,
self.ZScale, ':', self.XLimit, self.YLimit, self.ZLimit)))
return self.hasData
def buildColormap(self, color='rainbow', reverse=True):
clut = vtk.vtkLookupTable()
luts = self.config.LookupTables() if self.config else {}
if color in luts:
lut = luts[color]
if reverse:
lut.reverse()
clut.SetNumberOfColors(len(lut))
clut.Build()
for i in xrange(len(lut)):
lt = lut[i]
clut.SetTableValue(i, lt[0], lt[1], lt[2], lt[3])
return clut
hue_range = 0.0, 0.6667
saturation_range = 1.0, 1.0
value_range = 1.0, 1.0
if color.lower() == 'rainbow':
if reverse:
hue_range = 0.0, 0.6667
saturation_range = 1.0, 1.0
value_range = 1.0, 1.0
else:
hue_range = 0.6667, 0.0
saturation_range = 1.0, 1.0
value_range = 1.0, 1.0
elif color.lower() == 'black-white':
if reverse:
hue_range = 0.0, 0.0
saturation_range = 0.0, 0.0
value_range = 1.0, 0.0
else:
hue_range = 0.0, 0.0
saturation_range = 0.0, 0.0
value_range = 0.0, 1.0
clut.SetHueRange(hue_range)
clut.SetSaturationRange(saturation_range)
clut.SetValueRange(value_range)
clut.Build()
return clut
def applyColorMap(self, colorMap='red-blue', reverse=False):
"""
apply color map on the mapper
"""
clut = self.buildColormap(colorMap, reverse)
self.mapper.SetLookupTable(clut)
self.colorbar.SetLookupTable(self.mapper.GetLookupTable())
def makeCustomAxes(self, outline, outlinefilter):
""" create custom axes """
prop = vtk.vtkProperty()
prop.SetColor(self.fgColor)
x = vtk.vtkAxisActor()
x.SetAxisTypeToX()
# x.SetAxisPositionToMinMin()
x.SetCamera(self.renderer.GetActiveCamera())
x.SetBounds(outline.GetBounds())
x.SetProperty(prop)
x.SetRange(self.XLimit)
x.SetPoint1(outline.GetBounds()[0], outline.GetBounds()[2], outline.GetBounds()[4])
x.SetPoint2(outline.GetBounds()[1], outline.GetBounds()[2], outline.GetBounds()[4])
return x
def makeAxes(self, outline, outlinefilter):
""" create axes """
tprop = vtk.vtkTextProperty()
tprop.SetColor(self.fgColor)
tprop.ShadowOff()
prop = vtk.vtkProperty2D()
prop.SetColor(self.fgColor)
zax = vtk.vtkCubeAxesActor()
zax.SetBounds(outline.GetBounds()[0], outline.GetBounds()[1], outline.GetBounds()[2], outline.GetBounds()[3],
outline.GetBounds()[4], outline.GetBounds()[4])
zax.SetDragable(False)
zax.SetCamera(self.renderer.GetActiveCamera())
zax.SetFlyModeToOuterEdges()
zax.DrawXGridlinesOn()
zax.DrawYGridlinesOn()
zax.DrawZGridlinesOn()
zax.SetXTitle('')
zax.SetYTitle('')
zax.SetZTitle('')
zax.SetXAxisMinorTickVisibility(0)
zax.SetYAxisMinorTickVisibility(0)
zax.SetZAxisMinorTickVisibility(0)
zax.SetXAxisLabelVisibility(0)
zax.SetYAxisLabelVisibility(0)
zax.SetZAxisLabelVisibility(0)
axes = vtk.vtkCubeAxesActor2D()
axes.SetDragable(False)
axes.SetInputConnection(outlinefilter.GetOutputPort())
axes.SetCamera(self.renderer.GetActiveCamera())
axes.SetLabelFormat(self.config.LabelFormat())
axes.SetFlyModeToOuterEdges()
axes.SetFontFactor(self.fontFactor)
axes.SetNumberOfLabels(self.config.NLabels())
axes.SetXLabel(self.config.XLabel())
axes.SetYLabel(self.config.YLabel())
axes.SetZLabel(self.config.ZLabel())
axes.SetRanges(self.out.GetBounds())
axes.SetUseRanges(True)
axes.SetProperty(prop)
axes.SetAxisTitleTextProperty(tprop)
axes.SetAxisLabelTextProperty(tprop)
return zax, axes
def _calculateYCutterDelta(self, bounds):
if self.YCutterDelta is None:
self.YCutterDelta = (bounds[3] - bounds[2]) / ((self.YLimit[1] - self.YLimit[0]) * self.YCutterFactor)
return self.YCutterDelta
def _calculateYCutterPos(self, value):
if self.YCutterFactor < 1:
self.YCutterFactor = 1
if value > self.YLimit[1] * self.YCutterFactor:
value = self.YLimit[1] * self.YCutterFactor
if value < self.YLimit[0] * self.YCutterFactor:
value = self.YLimit[0] * self.YCutterFactor
bounds = self.mapper.GetBounds()
delta = self._calculateYCutterDelta(bounds)
npos = bounds[2] + (value - self.YLimit[0] * self.YCutterFactor) * delta
return npos
def moveYCutter(self, value):
if self.YCutterFactor < 1:
self.YCutterFactor = 1
if not self.YCutterTransform:
logging.error('YCutter not enabled')
return
if value > self.YLimit[1] * self.YCutterFactor or value < self.YLimit[0] * self.YCutterFactor:
logging.error('Y: %f Value is outside limits %s' % (value, (self.YLimit, self.YCutterFactor),))
return
npos = self._calculateYCutterPos(value)
ypos = self.YCutterTransform.GetPosition()[1]
move = npos - ypos
if self.yplane:
(x, _y, z) = self.yplane.GetOrigin()
self.yplane.SetOrigin(x, npos, z)
self.YCutterTransform.Translate(0, move, 0)
self.redraw()
def makeYCutter(self, bounds, scaleFactor, value):
# create y plane cutter
npos = self._calculateYCutterPos(value)
self.yplane = vtk.vtkPlane()
self.yplane.SetOrigin(0, npos, 0)
self.yplane.SetNormal(0, 1, 0)
self.YCutter = vtk.vtkCutter()
self.YCutter.SetInputConnection(self.warp.GetOutputPort())
self.YCutter.SetCutFunction(self.yplane)
self.YCutter.GenerateCutScalarsOff()
self.YCutterMapper = vtk.vtkPolyDataMapper()
self.YCutterMapper.SetInputConnection(self.YCutter.GetOutputPort())
# visual plane to move
plane = vtk.vtkPlaneSource()
plane.SetResolution(50, 50)
plane.SetCenter(0, 0, 0)
plane.SetNormal(0, 1, 0)
tran = vtk.vtkTransform()
tran.Translate((bounds[1] - bounds[0]) / 2. - (0 - bounds[0]), npos,
(bounds[5] - bounds[4]) / 2. - (0 - bounds[4]))
tran.Scale((bounds[1] - bounds[0]), 1, bounds[5] - bounds[4])
tran.PostMultiply()
self.YCutterTransform = tran
tranf = vtk.vtkTransformPolyDataFilter()
tranf.SetInputConnection(plane.GetOutputPort())
tranf.SetTransform(tran)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tranf.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.9, 0.9, 0.9)
actor.GetProperty().SetOpacity(self.opacitySlice)
return actor
def moveZCutter(self, value):
if not self.ZCutterTransform:
logging.error('ZCutter not enabled')
return
if value > self.ZLimit[1] * self.ZCutterFactor or value < self.ZLimit[0] * self.ZCutterFactor:
logging.error('Z: %f Value is outside limits %s' % (value, ( self.ZLimit, self.ZCutterFactor),))
return
npos = self._calculateZCutterPos(value)
zpos = self.ZCutterTransform.GetPosition()[2]
move = npos - zpos
if self.zplane:
(x, y, _z) = self.zplane.GetOrigin()
self.zplane.SetOrigin(x, y, npos)
self.ZCutterTransform.Translate(0, 0, move)
self.redraw()
def _calculateZCutterDelta(self, bounds):
if self.ZCutterDelta is None:
self.ZCutterDelta = (bounds[5] - bounds[4]) / ((self.ZLimit[1] - self.ZLimit[0]) * self.ZCutterFactor)
return self.ZCutterDelta
def _calculateZCutterPos(self, value):
if self.ZCutterFactor < 1:
self.ZCutterFactor = 1
if value > self.ZLimit[1] * self.ZCutterFactor:
value = self.ZLimit[1] * self.ZCutterFactor
if value < self.ZLimit[0] * self.ZCutterFactor:
value = self.ZLimit[0] * self.ZCutterFactor
bounds = self.mapper.GetBounds()
delta = self._calculateZCutterDelta(bounds)
npos = bounds[4] + (value - self.ZLimit[0] * self.ZCutterFactor) * delta
return npos
def makeZCutter(self, bounds, scaleFactor, value):
""" create z cutter plane """
npos = self._calculateZCutterPos(value)
self.zplane = vtk.vtkPlane()
self.zplane.SetOrigin(0, 0, npos)
self.zplane.SetNormal(0, 0, 1)
self.ZCutter = vtk.vtkCutter()
self.ZCutter.SetInputConnection(self.warp.GetOutputPort())
self.ZCutter.SetCutFunction(self.zplane)
self.ZCutter.GenerateCutScalarsOff()
self.ZCutterMapper = vtk.vtkPolyDataMapper()
self.ZCutterMapper.SetInputConnection(self.ZCutter.GetOutputPort())
# visual plane to move
plane = vtk.vtkPlaneSource()
plane.SetResolution(50, 50)
plane.SetCenter(0, 0, 0)
plane.SetNormal(0, 0, 1)
tran = vtk.vtkTransform()
tran.Translate((bounds[1] - bounds[0]) / 2. - (0 - bounds[0]), (bounds[3] - bounds[2]) / 2. - (0 - bounds[2]),
npos)
tran.Scale((bounds[1] - bounds[0]), (bounds[3] - bounds[2]), 1)
tran.PostMultiply()
self.ZCutterTransform = tran
tranf = vtk.vtkTransformPolyDataFilter()
tranf.SetInputConnection(plane.GetOutputPort())
tranf.SetTransform(tran)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tranf.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.9, 0.9, 0.9)
actor.GetProperty().SetOpacity(self.opacitySlice)
return actor
def _calculateXCutterDelta(self, bounds):
if self.XCutterDelta is None:
self.XCutterDelta = (bounds[1] - bounds[0]) / ((self.XLimit[1] - self.XLimit[0]) * self.XCutterFactor)
return self.XCutterDelta
def _calculateXCutterPos(self, value):
if self.XCutterFactor < 1:
self.XCutterFactor = 1
if value > self.XLimit[1] * self.XCutterFactor:
value = self.XLimit[1] * self.XCutterFactor
if value < self.XLimit[0] * self.XCutterFactor:
value = self.XLimit[0] * self.XCutterFactor
bounds = self.mapper.GetBounds()
delta = self._calculateXCutterDelta(bounds)
npos = bounds[0] + (value - self.XLimit[0] * self.XCutterFactor) * delta
return npos
def moveXCutter(self, value):
if not self.XCutterTransform:
logging.error('XCutter not enabled')
return
if value > self.XLimit[1] * self.XCutterFactor or value < self.XLimit[0] * self.XCutterFactor:
logging.error('X: %f Value is outside limits %s' % (value, (self.XLimit, self.XCutterFactor),))
return
npos = self._calculateXCutterPos(value)
xpos = self.XCutterTransform.GetPosition()[0]
move = npos - xpos
if self.xplane:
(_x, y, z) = self.xplane.GetOrigin()
self.xplane.SetOrigin(npos, y, z)
self.XCutterTransform.Translate(move, 0, 0)
self.redraw()
def makeXCutter(self, bounds, scaleFactor, value):
# create X plane cutter
npos = self._calculateXCutterPos(value)
self.xplane = vtk.vtkPlane()
self.xplane.SetOrigin(npos, 0, 0)
self.xplane.SetNormal(1, 0, 0)
self.XCutter = vtk.vtkCutter()
self.XCutter.SetInputConnection(self.warp.GetOutputPort())
self.XCutter.SetCutFunction(self.xplane)
self.XCutter.GenerateCutScalarsOff()
self.XCutterMapper = vtk.vtkPolyDataMapper()
self.XCutterMapper.SetInputConnection(self.XCutter.GetOutputPort())
# visual plane to move
plane = vtk.vtkPlaneSource()
plane.SetResolution(50, 50)
plane.SetCenter(0, 0, 0)
plane.SetNormal(1, 0, 0)
tran = vtk.vtkTransform()
tran.Translate(npos, (bounds[3] - bounds[2]) / 2. - (0 - bounds[2]),
(bounds[5] - bounds[4]) / 2. - (0 - bounds[4]))
tran.Scale(1, (bounds[3] - bounds[2]), bounds[5] - bounds[4])
tran.PostMultiply()
self.XCutterTransform = tran
tranf = vtk.vtkTransformPolyDataFilter()
tranf.SetInputConnection(plane.GetOutputPort())
tranf.SetTransform(tran)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tranf.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.9, 0.9, 0.9)
actor.GetProperty().SetOpacity(self.opacitySlice)
return actor
def makeColorbar(self):
""" create colorbar """
colorbar = vtk.vtkScalarBarActor()
colorbar.SetLookupTable(self.mapper.GetLookupTable())
colorbar.SetWidth(0.085)
colorbar.SetHeight(0.8)
colorbar.SetPosition(0.9, 0.1)
colorbar.SetLabelFormat(self.config.LabelFormat())
colorbar.SetNumberOfLabels(5)
text_prop_cb = colorbar.GetLabelTextProperty()
text_prop_cb.SetFontFamilyAsString('Arial')
text_prop_cb.SetFontFamilyToArial()
text_prop_cb.SetColor(self.fgColor)
text_prop_cb.SetFontSize(1)
text_prop_cb.ShadowOff()
colorbar.SetLabelTextProperty(text_prop_cb)
return colorbar
def parseRenderArgs(self, **args):
""" parse class args """
self.XCutterFactor = args.get('XCutterFactor', 1)
self.YCutterFactor = args.get('YCutterFactor', 1)
self.ZCutterFactor = args.get('ZCutterFactor', 1)
self.rotateX = args.get('rotateX', 30)
self.rotateY = args.get('rotateY', 30)
self.rotateZ = args.get('rotateZ', 120)
self.zoomFactor = args.get('zoomFactor', 1)
self.posFactor = args.get('posFactor', 1)
def render(self, **args):
""" main function to render all required objects """
gridData = args.get('gridData', True)
drawSurface = args.get('drawSurface', True)
drawAxes = args.get('drawAxes', True)
drawColorBar = args.get('drawColorBar', True)
drawLegend = args.get('drawLegend', True)
wireSurface = args.get('wireSurface', False)
drawBox = args.get('drawBox', True)
scaleFactor = args.get('scaleFactor', (1, 1, 1))
autoscale = args.get('autoScale', True)
colorMap = args.get('colorMap', 'rainbow')
reverseMap = args.get('reverseMap', False)
drawGrid = args.get('drawGrid', False)
resolution = args.get('gridResolution', 10)
xtics = args.get('xtics', 0)
ytics = args.get('ytics', 0)
ztics = args.get('ztics', 0)
planeGrid = args.get('planeGrid', True)
xCutterOn = args.get('XCutterOn', True)
yCutterOn = args.get('YCutterOn', True)
zCutterOn = args.get('ZCutterOn', True)
xCutterPos = args.get('XCutterPos', None)
yCutterPos = args.get('YCutterPos', None)
zCutterPos = args.get('ZCutterPos', None)
self.parseRenderArgs(**args)
if gridData:
geometry = vtk.vtkStructuredGridGeometryFilter()
else:
geometry = vtk.vtkRectilinearGridGeometryFilter()
geometry.SetInputData(self.gridfunc)
geometry.SetExtent(self.gridfunc.GetExtent())
if gridData:
wzscale = self.computeScale(self.gridfunc)
self.out = geometry.GetOutput()
else:
geometry.SetExtent(self.gridfunc.GetExtent())
geometry.GetOutput().SetPoints(self.Points)
geometry.GetOutput().GetPointData().SetScalars(self.Colors)
geometry.GetOutput().Update()
self.out = geometry.GetOutput()
self.out.SetPoints(self.Points)
self.out.GetPointData().SetScalars(self.Colors)
self.out.Update()
wzscale = self.computeScale(self.out)
x = self.XScale if autoscale else self.XScale * scaleFactor[0]
y = self.YScale if autoscale else self.YScale * scaleFactor[1]
z = 0.5 * self.ZScale if autoscale else self.ZScale * scaleFactor[2]
transform = vtk.vtkTransform()
transform.Scale(x, y, z)
trans = vtk.vtkTransformPolyDataFilter()
trans.SetInputConnection(geometry.GetOutputPort())
trans.SetTransform(transform)
localScale = wzscale if wzscale < 1 else 1 / wzscale
self.warp = vtk.vtkWarpScalar()
self.warp.XYPlaneOn()
self.warp.SetInputConnection(trans.GetOutputPort())
self.warp.SetNormal(0, 0, 1)
self.warp.UseNormalOn()
self.warp.SetScaleFactor(localScale)
tmp = self.gridfunc.GetScalarRange()
# map gridfunction
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInputConnection(self.warp.GetOutputPort())
# calculate ranges
if self.customZRange:
self.mapper.SetScalarRange(*self.customZRange)
elif self.autoZRange:
mx = max(abs(tmp[0]), abs(tmp[1]))
self.mapper.SetScalarRange(-mx, mx)
else:
self.mapper.SetScalarRange(tmp[0], tmp[1])
wireActor = None
bounds = self.mapper.GetBounds()
# wire mapper
if planeGrid:
if not gridData:
self.plane = vtk.vtkRectilinearGridGeometryFilter()
self.plane.SetInput(self.gridfunc)
self.plane.SetExtent(self.gridfunc.GetExtent())
x_, y_ = x, y
else:
self.plane = vtk.vtkPlaneSource()
self.plane.SetXResolution(resolution)
self.plane.SetYResolution(resolution)
x_, y_ = bounds[1] - bounds[0], bounds[3] - bounds[2]
pltr = vtk.vtkTransform()
pltr.Translate((bounds[1] - bounds[0]) / 2. - (0 - bounds[0]),
(bounds[3] - bounds[2]) / 2. - (0 - bounds[2]), bounds[4])
pltr.Scale(x_, y_, 1)
pltran = vtk.vtkTransformPolyDataFilter()
pltran.SetInputConnection(self.plane.GetOutputPort())
pltran.SetTransform(pltr)
cmap = self.buildColormap('black-white', True)
rgridMapper = vtk.vtkPolyDataMapper()
rgridMapper.SetInputConnection(pltran.GetOutputPort())
rgridMapper.SetLookupTable(cmap)
wireActor = vtk.vtkActor()
wireActor.SetMapper(rgridMapper)
wireActor.GetProperty().SetRepresentationToWireframe()
wireActor.GetProperty().SetColor(self.fgColor)
# xcutter actor
xactor = None
if xCutterOn:
xactor = self.makeXCutter(bounds, scaleFactor, xCutterPos)
# ycutter actor
yactor = None
if yCutterOn:
yactor = self.makeYCutter(bounds, scaleFactor, yCutterPos)
# zcutter actor
zactor = None
if zCutterOn:
zactor = self.makeZCutter(bounds, scaleFactor, zCutterPos)
# create plot surface actor
surfplot = vtk.vtkActor()
surfplot.SetMapper(self.mapper)
if wireSurface:
surfplot.GetProperty().SetRepresentationToWireframe()
# color map
clut = self.buildColormap(colorMap, reverseMap)
self.mapper.SetLookupTable(clut)
# create outline
outlinefilter = vtk.vtkOutlineFilter()
outlinefilter.SetInputConnection(self.warp.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outlinefilter.GetOutputPort())
outline = vtk.vtkActor()
outline.SetMapper(outlineMapper)
outline.GetProperty().SetColor(self.fgColor)
# make axes
zax, axes = self.makeAxes(outline, outlinefilter)
# setup axes
xaxis = axes.GetXAxisActor2D()
yaxis = axes.GetYAxisActor2D()
zaxis = axes.GetZAxisActor2D()
xaxis.SetLabelFormat(self.config.XLabelsFormat())
xaxis.SetAdjustLabels(1)
xaxis.SetNumberOfMinorTicks(xtics)
yaxis.SetLabelFormat(self.config.YLabelsFormat())
yaxis.SetNumberOfMinorTicks(ytics)
yaxis.SetAdjustLabels(1)
zaxis.SetLabelFormat(self.config.ZLabelsFormat())
zaxis.SetNumberOfMinorTicks(ztics)
zaxis.SetAdjustLabels(1)
# create colorbar
colorbar = self.makeColorbar()
# renderer
if drawSurface:
self.renderer.AddActor(surfplot)
self.actors.append(surfplot)
if drawGrid:
self.renderer.AddViewProp(zax)
self.actors.append(zax)
if planeGrid:
self.renderer.AddActor(wireActor)
self.actors.append(wireActor)
if drawBox:
self.renderer.AddActor(outline)
self.actors.append(outline)
if drawAxes:
self.renderer.AddViewProp(axes)
self.actors.append(axes)
if drawColorBar or drawLegend:
self.renderer.AddActor(colorbar)
self.actors.append(colorbar)
self.colorbar = colorbar
self._addPlaneCutters(xactor, yactor, zactor, xCutterOn, yCutterOn, zCutterOn)
def _addPlaneCutters(self, xactor, yactor, zactor, xCutterOn, yCutterOn, zCutterOn):
''' add plane cutters '''
if xCutterOn and xactor:
self.renderer.AddActor(xactor)
self.actors.append(xactor)
if yCutterOn and yactor:
self.renderer.AddActor(yactor)
self.actors.append(yactor)
if zCutterOn and zactor:
self.renderer.AddActor(zactor)
self.actors.append(zactor)
def renderToPng(self, path=None):
if not path:
return
renWin = self.getRenderWindow()
w2i = vtk.vtkWindowToImageFilter()
w2i.SetMagnification(1)
w2i.SetInputBufferTypeToRGBA()
w2i.SetInput(renWin)
w2i.Update()
pngfile = vtk.vtkPNGWriter()
pngfile.SetInputConnection(w2i.GetOutputPort())
pngfile.SetFileName(path)
self.redraw()
pngfile.Write()
|
viz4biz/PyDataNYC2015
|
vtklib/vtk_surface.py
|
Python
|
apache-2.0
| 42,076
|
[
"VTK"
] |
e1e276305e5d5cab93914fb209be2a8c33e78a583bb436ab5f3d636571205f5d
|
import numpy as np
import itertools
class Structure:
def __init__(self,
positions=None,
scaled_positions=None,
masses=None,
cell=None,
force_sets=None,
force_constants=None,
atomic_numbers=None,
atomic_elements=None,
atom_type_index=None,
primitive_matrix=None):
"""
:param positions: atoms cartesian positions (array Ndim x Natoms)
:param scaled_positions: atom positions scaled to 1 (array Ndim x Natoms)
:param masses: masses of the atoms (vector NAtoms)
:param cell: Numpy array containing the unit cell (lattice vectors in rows)
:param force_sets: force constants: Harmonic force constants
:param force_constants: atomic numbers vector (1x Natoms):
:param atomic_numbers: number of total atoms in the crystal:
:param atomic_elements: atomic names of each element (ex: H, Be, Si,..) (vector Natoms):
:param atom_type_index: index vector that contains the number of different types of atoms in crystal (vector NdiferentAtoms)
:param primitive_matrix: matrix that defines the primitive cell respect to the unicell
"""
self._cell = np.array(cell, dtype='double')
self._masses = np.array(masses, dtype='double')
self._atomic_numbers = np.array(atomic_numbers, dtype='double')
self._force_constants = force_constants
self._force_sets = force_sets
self._atomic_elements = atomic_elements
self._atom_type_index = atom_type_index
self._scaled_positions = scaled_positions
self._positions = positions
self._primitive_matrix = primitive_matrix
self._primitive_cell = None
self._supercell_matrix = None
self._supercell_phonon = None
self._supercell_phonon_renormalized = None
self._number_of_cell_atoms = None
self._number_of_atoms = None
self._number_of_atom_types = None
self._number_of_primitive_atoms = None
# Get atomic types from masses if available
if atomic_elements is None and masses is not None:
self._atomic_elements = []
for i in masses:
for j in atom_data:
if "{0:.1f}".format(i) == "{0:.1f}".format(j[3]):
self._atomic_elements.append(j[1])
# Get atomic numbers and masses from available information
if atomic_numbers is None and self._atomic_elements is not None:
self._atomic_numbers = np.array([symbol_map[i] for i in self._atomic_elements])
else:
self._atomic_numbers = np.array(atomic_numbers)
if masses is None and self._atomic_numbers is not None:
self._masses = np.array([atom_data[i][3] for i in self._atomic_numbers])
else:
self._masses = masses
# -------- Methods start here -----------
# Getting data
def get_data_from_dict(self, data_dictionary):
for data in self.__dict__:
try:
self.__dict__[data] = data_dictionary[data]
except KeyError:
continue
# Cell related methods
def set_cell(self, cell):
self._cell = np.array(cell, dtype='double')
def get_cell(self, supercell=None):
if supercell is not None:
return np.dot(self._cell, np.diag(supercell))
return self._cell
def get_supercell_phonon(self):
if self.get_force_constants() is not None:
supercell_phonon = self.get_force_constants().get_supercell()
elif self.get_force_sets() is not None:
supercell_phonon = self.get_force_sets().get_supercell()
else:
supercell_phonon = np.identity(self.get_number_of_dimensions(), dtype=int)
return supercell_phonon
def set_supercell_matrix(self, supercell_matrix):
self._supercell_matrix = supercell_matrix
def get_supercell_matrix(self):
if self._supercell_matrix is None:
self._supercell_matrix = np.array(self.get_number_of_dimensions() * [1], dtype=int)
return self._supercell_matrix
def get_primitive_cell(self):
if self._primitive_cell is None:
self._primitive_cell = np.dot(self.get_cell().T, self.get_primitive_matrix()).T
return self._primitive_cell
# Cell matrix related methods
def set_primitive_matrix(self,primitive_matrix):
self._primitive_matrix = primitive_matrix
self._number_of_atom_types = None
self._number_of_primitive_atoms = None
self._atom_type_index = None
self._primitive_cell = None
def get_primitive_matrix(self):
if self._primitive_matrix is None:
if self._primitive_cell is None:
print('Warning: No primitive matrix defined! Using unit cell as primitive')
self._primitive_matrix = np.identity(self.get_number_of_dimensions())
else:
self._primitive_matrix = np.dot(np.linalg.inv(self.get_cell()), self._primitive_cell)
return self._primitive_matrix
# Positions related methods
def set_positions(self, cart_positions):
self._scaled_positions = np.dot(cart_positions, np.linalg.inv(self.get_cell()))
def get_positions(self, supercell=None):
if self._positions is None:
if self._scaled_positions is None:
print('No positions provided')
exit()
else:
self._positions = np.dot(self._scaled_positions, self.get_cell())
if supercell is None:
supercell = self.get_number_of_dimensions() * [1]
position_supercell = []
for k in range(self._positions.shape[0]):
for r in itertools.product(*[range (i) for i in supercell[::-1]]):
position_supercell.append(self._positions[k,:] + np.dot(np.array(r[::-1]), self.get_cell()))
position_supercell = np.array(position_supercell)
return position_supercell
def get_scaled_positions(self, supercell=None):
if self._scaled_positions is None:
self._scaled_positions = np.dot(self.get_positions(), np.linalg.inv(self.get_cell()))
if supercell is not None:
cell = self.get_cell(supercell=supercell)
scaled_positions = np.dot(self.get_positions(supercell), np.linalg.inv(cell))
return scaled_positions
return self._scaled_positions
# Force related methods
def forces_available(self):
if self.get_force_constants() is not None or self.get_force_sets() is not None:
return True
else:
return False
def set_force_constants(self, force_constants):
self._force_constants = force_constants
def get_force_constants(self):
return self._force_constants
def set_force_set(self, force_set):
self._force_sets = force_set
def get_force_sets(self):
if not isinstance(self._force_sets,type(None)):
force_atoms_file = self._force_sets.get_dict()['natom']
force_atoms_input = np.product(np.diagonal(self._force_sets.get_supercell())) * self.get_number_of_atoms()
if force_atoms_file != force_atoms_input:
print("Error: FORCE_SETS file does not match with SUPERCELL MATRIX")
exit()
return self._force_sets
def set_masses(self, masses):
self._masses = np.array(masses, dtype='double')
def get_masses(self, supercell=None):
if supercell is None:
supercell = self.get_number_of_dimensions() * [1]
mass_supercell = []
for j in range(self.get_number_of_cell_atoms()):
mass_supercell += [ self._masses[j] ] * np.prod(supercell)
return mass_supercell
# Number of atoms and dimensions related methods
def get_number_of_cell_atoms(self):
if self._number_of_cell_atoms is None:
self._number_of_cell_atoms = self.get_positions().shape[0]
return self._number_of_cell_atoms
def get_number_of_dimensions(self):
return self.get_cell().shape[1]
def get_atomic_numbers(self, supercell=None):
if supercell is None:
supercell = self.get_number_of_dimensions() * [1]
atomic_numbers_supercell = []
for j in range(self.get_number_of_cell_atoms()):
atomic_numbers_supercell += [self._atomic_numbers[j] ] * np.prod(supercell)
return np.array(atomic_numbers_supercell,dtype=int)
def get_number_of_atoms(self):
if self._number_of_atoms is None:
self._number_of_atoms = self.get_number_of_cell_atoms()
return self._number_of_atoms
def set_number_of_atoms(self,number_of_atoms):
self._number_of_atoms = number_of_atoms
def get_number_of_atom_types(self):
if self._number_of_atom_types is None:
self._number_of_atom_types = len(set(self.get_atom_type_index()))
# print(self._number_of_atom_types)
return self._number_of_atom_types
def get_number_of_primitive_atoms(self):
if self._number_of_primitive_atoms is None:
self._number_of_primitive_atoms = len(set(self.get_atom_type_index()))
return self._number_of_primitive_atoms
def set_number_of_primitive_atoms(self,number_of_primitive_atoms):
self._number_of_primitive_atoms = number_of_primitive_atoms
# Atomic types related methods
def get_atomic_elements(self, supercell=None, unique=False):
if supercell is None:
supercell = self.get_number_of_dimensions() * [1]
atomic_types = []
for j in range(self.get_number_of_cell_atoms()):
atomic_types += [self._atomic_elements[j]] * np.prod(supercell)
if unique:
unique_indices = np.unique(self.get_atom_type_index(supercell=supercell), return_index=True)[1]
atomic_types = np.array(atomic_types)[unique_indices]
return atomic_types
def set_atom_type_index_by_element(self):
if self._atom_type_index is None:
self._atom_type_index = self._atomic_numbers.copy()
for i in range(self.get_number_of_cell_atoms()):
for index in range(self.get_number_of_atom_types()):
if self._atomic_numbers[index] == self._atomic_numbers[i]:
self._atom_type_index[i] = index
def get_atom_type_index(self, supercell=None):
# Tolerance for accepting equivalent atoms in super cell
masses = self.get_masses(supercell=supercell)
tolerance = 0.001
if self._atom_type_index is None:
primitive_cell_inverse = np.linalg.inv(self.get_primitive_cell())
self._atom_type_index = np.array(self.get_number_of_cell_atoms() * [None])
counter = 0
for i in range(self.get_number_of_cell_atoms()):
if self._atom_type_index[i] is None:
self._atom_type_index[i] = counter
counter += 1
for j in range(i+1, self.get_number_of_cell_atoms()):
coordinates_atom_i = self.get_positions()[i]
coordinates_atom_j = self.get_positions()[j]
difference_in_cell_coordinates = np.around((np.dot(primitive_cell_inverse.T, (coordinates_atom_j - coordinates_atom_i))))
projected_coordinates_atom_j = coordinates_atom_j - np.dot(self.get_primitive_cell().T, difference_in_cell_coordinates)
separation = pow(np.linalg.norm(projected_coordinates_atom_j - coordinates_atom_i),2)
if separation < tolerance and masses[i] == masses[j]:
self._atom_type_index[j] = self._atom_type_index[i]
self._atom_type_index = np.array(self._atom_type_index,dtype=int)
if supercell is None:
supercell = self.get_number_of_dimensions() * [1]
atom_type_index_supercell = []
for j in range(self.get_number_of_cell_atoms()):
atom_type_index_supercell += [self._atom_type_index[j] ] * np.prod(supercell)
return atom_type_index_supercell
def get_cell_parameters(self, supercell=None):
if supercell is None:
supercell = self.get_number_of_dimensions() * [1]
cell = self.get_cell(supercell=supercell)
a = np.linalg.norm(cell[0])
b = np.linalg.norm(cell[1])
c = np.linalg.norm(cell[2])
alpha = np.arccos(np.dot(cell[1], cell[2])/(c*b))
gamma = np.arccos(np.dot(cell[1], cell[0])/(a*b))
beta = np.arccos(np.dot(cell[2], cell[0])/(a*c))
return a, b, c, alpha, beta, gamma
def get_commensurate_points(self, supercell=None):
if supercell is None:
supercell = self.get_number_of_dimensions() * [1]
primitive_matrix = self.get_primitive_matrix()
commensurate_points = []
for k1 in np.arange(-0.5, 0.5, 1./(supercell[0]*2)):
for k2 in np.arange(-0.5, 0.5, 1./(supercell[1]*2)):
for k3 in np.arange(-0.5, 0.5, 1./(supercell[2]*2)):
q_point = [np.around(k1,decimals=5), np.around(k2,decimals=5), np.around(k3,decimals=5)]
q_point_unit_cell = np.dot(q_point, np.linalg.inv(primitive_matrix))
q_point_unit_cell = np.multiply(q_point_unit_cell, supercell) * 2
if np.all(np.equal(np.mod(q_point_unit_cell, 1), 0)):
commensurate_points.append(q_point)
return commensurate_points
def get_path_using_seek_path(self):
try:
import seekpath
cell = self.get_cell()
positions = self.get_scaled_positions()
numbers = np.unique(self.get_atomic_elements(), return_inverse=True)[1]
structure = (cell, positions, numbers)
path_data = seekpath.get_path(structure)
labels = path_data['point_coords']
band_ranges = []
for set in path_data['path']:
band_ranges.append([labels[set[0]], labels[set[1]]])
return {'ranges': band_ranges,
'labels': path_data['path']}
except ImportError:
print ('Seekpath not installed. Autopath is deactivated')
band_ranges=([[[0.0, 0.0, 0.0], [0.5, 0.0, 0.5]]])
return {'ranges': band_ranges,
'labels': [['GAMMA', '1/2,0,1/2']]}
atom_data = [
[ 0, "X", "X", 0],
[ 1, "H", "Hydrogen", 1.00794],
[ 2, "He", "Helium", 4.002602],
[ 3, "Li", "Lithium", 6.941],
[ 4, "Be", "Beryllium", 9.012182],
[ 5, "B", "Boron", 10.811],
[ 6, "C", "Carbon", 12.0107],
[ 7, "N", "Nitrogen", 14.0067],
[ 8, "O", "Oxygen", 15.9994],
[ 9, "F", "Fluorine", 18.9984032],
[ 10, "Ne", "Neon", 20.1797],
[ 11, "Na", "Sodium", 22.98976928],
[ 12, "Mg", "Magnesium", 24.3050],
[ 13, "Al", "Aluminium", 26.9815386],
[ 14, "Si", "Silicon", 28.0855],
[ 15, "P", "Phosphorus", 30.973762],
[ 16, "S", "Sulfur", 32.065],
[ 17, "Cl", "Chlorine", 35.453],
[ 18, "Ar", "Argon", 39.948],
[ 19, "K", "Potassium", 39.0983],
[ 20, "Ca", "Calcium", 40.078],
[ 21, "Sc", "Scandium", 44.955912],
[ 22, "Ti", "Titanium", 47.867],
[ 23, "V", "Vanadium", 50.9415],
[ 24, "Cr", "Chromium", 51.9961],
[ 25, "Mn", "Manganese", 54.938045],
[ 26, "Fe", "Iron", 55.845],
[ 27, "Co", "Cobalt", 58.933195],
[ 28, "Ni", "Nickel", 58.6934],
[ 29, "Cu", "Copper", 63.546],
[ 30, "Zn", "Zinc", 65.38],
[ 31, "Ga", "Gallium", 69.723],
[ 32, "Ge", "Germanium", 72.64],
[ 33, "As", "Arsenic", 74.92160],
[ 34, "Se", "Selenium", 78.96],
[ 35, "Br", "Bromine", 79.904],
[ 36, "Kr", "Krypton", 83.798],
[ 37, "Rb", "Rubidium", 85.4678],
[ 38, "Sr", "Strontium", 87.62],
[ 39, "Y", "Yttrium", 88.90585],
[ 40, "Zr", "Zirconium", 91.224],
[ 41, "Nb", "Niobium", 92.90638],
[ 42, "Mo", "Molybdenum", 95.96],
[ 43, "Tc", "Technetium", 0],
[ 44, "Ru", "Ruthenium", 101.07],
[ 45, "Rh", "Rhodium", 102.90550],
[ 46, "Pd", "Palladium", 106.42],
[ 47, "Ag", "Silver", 107.8682],
[ 48, "Cd", "Cadmium", 112.411],
[ 49, "In", "Indium", 114.818],
[ 50, "Sn", "Tin", 118.710],
[ 51, "Sb", "Antimony", 121.760],
[ 52, "Te", "Tellurium", 127.60],
[ 53, "I", "Iodine", 126.90447],
[ 54, "Xe", "Xenon", 131.293],
[ 55, "Cs", "Caesium", 132.9054519],
[ 56, "Ba", "Barium", 137.327],
[ 57, "La", "Lanthanum", 138.90547],
[ 58, "Ce", "Cerium", 140.116],
[ 59, "Pr", "Praseodymium", 140.90765],
[ 60, "Nd", "Neodymium", 144.242],
[ 61, "Pm", "Promethium", 0],
[ 62, "Sm", "Samarium", 150.36],
[ 63, "Eu", "Europium", 151.964],
[ 64, "Gd", "Gadolinium", 157.25],
[ 65, "Tb", "Terbium", 158.92535],
[ 66, "Dy", "Dysprosium", 162.500],
[ 67, "Ho", "Holmium", 164.93032],
[ 68, "Er", "Erbium", 167.259],
[ 69, "Tm", "Thulium", 168.93421],
[ 70, "Yb", "Ytterbium", 173.054],
[ 71, "Lu", "Lutetium", 174.9668],
[ 72, "Hf", "Hafnium", 178.49],
[ 73, "Ta", "Tantalum", 180.94788],
[ 74, "W", "Tungsten", 183.84],
[ 75, "Re", "Rhenium", 186.207],
[ 76, "Os", "Osmium", 190.23],
[ 77, "Ir", "Iridium", 192.217],
[ 78, "Pt", "Platinum", 195.084],
[ 79, "Au", "Gold", 196.966569],
[ 80, "Hg", "Mercury", 200.59],
[ 81, "Tl", "Thallium", 204.3833],
[ 82, "Pb", "Lead", 207.2],
[ 83, "Bi", "Bismuth", 208.98040],
[ 84, "Po", "Polonium", 0],
[ 85, "At", "Astatine", 0],
[ 86, "Rn", "Radon", 0],
[ 87, "Fr", "Francium", 0],
[ 88, "Ra", "Radium", 0],
[ 89, "Ac", "Actinium", 0],
[ 90, "Th", "Thorium", 232.03806],
[ 91, "Pa", "Protactinium", 231.03588],
[ 92, "U", "Uranium", 238.02891],
[ 93, "Np", "Neptunium", 0],
[ 94, "Pu", "Plutonium", 0],
[ 95, "Am", "Americium", 0],
[ 96, "Cm", "Curium", 0],
[ 97, "Bk", "Berkelium", 0],
[ 98, "Cf", "Californium", 0],
[ 99, "Es", "Einsteinium", 0],
[100, "Fm", "Fermium", 0],
[101, "Md", "Mendelevium", 0],
[102, "No", "Nobelium", 0],
[103, "Lr", "Lawrencium", 0],
[104, "Rf", "Rutherfordium", 0],
[105, "Db", "Dubnium", 0],
[106, "Sg", "Seaborgium", 0],
[107, "Bh", "Bohrium", 0],
[108, "Hs", "Hassium", 0],
[109, "Mt", "Meitnerium", 0],
[110, "Ds", "Darmstadtium", 0],
[111, "Rg", "Roentgenium", 0],
[112, "Cn", "Copernicium", 0],
[113, "Uut", "Ununtrium", 0],
[114, "Uuq", "Ununquadium", 0],
[115, "Uup", "Ununpentium", 0],
[116, "Uuh", "Ununhexium", 0],
[117, "Uus", "Ununseptium", 0],
[118, "Uuo", "Ununoctium", 0],
]
symbol_map = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
"Na": 11,
"Mg": 12,
"Al": 13,
"Si": 14,
"P": 15,
"S": 16,
"Cl": 17,
"Ar": 18,
"K": 19,
"Ca": 20,
"Sc": 21,
"Ti": 22,
"V": 23,
"Cr": 24,
"Mn": 25,
"Fe": 26,
"Co": 27,
"Ni": 28,
"Cu": 29,
"Zn": 30,
"Ga": 31,
"Ge": 32,
"As": 33,
"Se": 34,
"Br": 35,
"Kr": 36,
"Rb": 37,
"Sr": 38,
"Y": 39,
"Zr": 40,
"Nb": 41,
"Mo": 42,
"Tc": 43,
"Ru": 44,
"Rh": 45,
"Pd": 46,
"Ag": 47,
"Cd": 48,
"In": 49,
"Sn": 50,
"Sb": 51,
"Te": 52,
"I": 53,
"Xe": 54,
"Cs": 55,
"Ba": 56,
"La": 57,
"Ce": 58,
"Pr": 59,
"Nd": 60,
"Pm": 61,
"Sm": 62,
"Eu": 63,
"Gd": 64,
"Tb": 65,
"Dy": 66,
"Ho": 67,
"Er": 68,
"Tm": 69,
"Yb": 70,
"Lu": 71,
"Hf": 72,
"Ta": 73,
"W": 74,
"Re": 75,
"Os": 76,
"Ir": 77,
"Pt": 78,
"Au": 79,
"Hg": 80,
"Tl": 81,
"Pb": 82,
"Bi": 83,
"Po": 84,
"At": 85,
"Rn": 86,
"Fr": 87,
"Ra": 88,
"Ac": 89,
"Th": 90,
"Pa": 91,
"U": 92,
"Np": 93,
"Pu": 94,
"Am": 95,
"Cm": 96,
"Bk": 97,
"Cf": 98,
"Es": 99,
"Fm": 100,
"Md": 101,
"No": 102,
"Lr": 103,
"Rf": 104,
"Db": 105,
"Sg": 106,
"Bh": 107,
"Hs": 108,
"Mt": 109,
"Ds": 110,
"Rg": 111,
"Cn": 112,
"Uut": 113,
"Uuq": 114,
"Uup": 115,
"Uuh": 116,
"Uus": 117,
"Uuo": 118,
}
|
abelcarreras/DynaPhoPy
|
dynaphopy/atoms.py
|
Python
|
mit
| 20,666
|
[
"CRYSTAL"
] |
52e3d03a7f8a4121ef013419a73ce024ce6a28dc380b7ab0cbbbadf9375fb269
|
"""Support for control of ElkM1 sensors."""
from elkm1_lib.const import (
SettingFormat,
ZoneLogicalStatus,
ZonePhysicalStatus,
ZoneType,
)
from elkm1_lib.util import pretty_const, username
import voluptuous as vol
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import ELECTRIC_POTENTIAL_VOLT
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_platform
from . import ElkAttachedEntity, create_elk_entities
from .const import ATTR_VALUE, DOMAIN, ELK_USER_CODE_SERVICE_SCHEMA
SERVICE_SENSOR_COUNTER_REFRESH = "sensor_counter_refresh"
SERVICE_SENSOR_COUNTER_SET = "sensor_counter_set"
SERVICE_SENSOR_ZONE_BYPASS = "sensor_zone_bypass"
SERVICE_SENSOR_ZONE_TRIGGER = "sensor_zone_trigger"
UNDEFINED_TEMPATURE = -40
ELK_SET_COUNTER_SERVICE_SCHEMA = {
vol.Required(ATTR_VALUE): vol.All(vol.Coerce(int), vol.Range(0, 65535))
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Create the Elk-M1 sensor platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
entities = []
elk = elk_data["elk"]
create_elk_entities(elk_data, elk.counters, "counter", ElkCounter, entities)
create_elk_entities(elk_data, elk.keypads, "keypad", ElkKeypad, entities)
create_elk_entities(elk_data, [elk.panel], "panel", ElkPanel, entities)
create_elk_entities(elk_data, elk.settings, "setting", ElkSetting, entities)
create_elk_entities(elk_data, elk.zones, "zone", ElkZone, entities)
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SENSOR_COUNTER_REFRESH,
{},
"async_counter_refresh",
)
platform.async_register_entity_service(
SERVICE_SENSOR_COUNTER_SET,
ELK_SET_COUNTER_SERVICE_SCHEMA,
"async_counter_set",
)
platform.async_register_entity_service(
SERVICE_SENSOR_ZONE_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_zone_bypass",
)
platform.async_register_entity_service(
SERVICE_SENSOR_ZONE_TRIGGER,
{},
"async_zone_trigger",
)
def temperature_to_state(temperature, undefined_temperature):
"""Convert temperature to a state."""
return temperature if temperature > undefined_temperature else None
class ElkSensor(ElkAttachedEntity, SensorEntity):
"""Base representation of Elk-M1 sensor."""
def __init__(self, element, elk, elk_data):
"""Initialize the base of all Elk sensors."""
super().__init__(element, elk, elk_data)
self._state = None
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
async def async_counter_refresh(self):
"""Refresh the value of a counter from the panel."""
if not isinstance(self, ElkCounter):
raise HomeAssistantError("supported only on ElkM1 Counter sensors")
self._element.get()
async def async_counter_set(self, value=None):
"""Set the value of a counter on the panel."""
if not isinstance(self, ElkCounter):
raise HomeAssistantError("supported only on ElkM1 Counter sensors")
self._element.set(value)
async def async_zone_bypass(self, code=None):
"""Bypass zone."""
if not isinstance(self, ElkZone):
raise HomeAssistantError("supported only on ElkM1 Zone sensors")
self._element.bypass(code)
async def async_zone_trigger(self):
"""Trigger zone."""
if not isinstance(self, ElkZone):
raise HomeAssistantError("supported only on ElkM1 Zone sensors")
self._element.trigger()
class ElkCounter(ElkSensor):
"""Representation of an Elk-M1 Counter."""
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:numeric"
def _element_changed(self, element, changeset):
self._state = self._element.value
class ElkKeypad(ElkSensor):
"""Representation of an Elk-M1 Keypad."""
@property
def temperature_unit(self):
"""Return the temperature unit."""
return self._temperature_unit
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return self._temperature_unit
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:thermometer-lines"
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs["area"] = self._element.area + 1
attrs["temperature"] = self._state
attrs["last_user_time"] = self._element.last_user_time.isoformat()
attrs["last_user"] = self._element.last_user + 1
attrs["code"] = self._element.code
attrs["last_user_name"] = username(self._elk, self._element.last_user)
attrs["last_keypress"] = self._element.last_keypress
return attrs
def _element_changed(self, element, changeset):
self._state = temperature_to_state(
self._element.temperature, UNDEFINED_TEMPATURE
)
class ElkPanel(ElkSensor):
"""Representation of an Elk-M1 Panel."""
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:home"
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs["system_trouble_status"] = self._element.system_trouble_status
return attrs
def _element_changed(self, element, changeset):
if self._elk.is_connected():
self._state = (
"Paused" if self._element.remote_programming_status else "Connected"
)
else:
self._state = "Disconnected"
class ElkSetting(ElkSensor):
"""Representation of an Elk-M1 Setting."""
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:numeric"
def _element_changed(self, element, changeset):
self._state = self._element.value
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs["value_format"] = SettingFormat(self._element.value_format).name.lower()
return attrs
class ElkZone(ElkSensor):
"""Representation of an Elk-M1 Zone."""
@property
def icon(self):
"""Icon to use in the frontend."""
zone_icons = {
ZoneType.FIRE_ALARM.value: "fire",
ZoneType.FIRE_VERIFIED.value: "fire",
ZoneType.FIRE_SUPERVISORY.value: "fire",
ZoneType.KEYFOB.value: "key",
ZoneType.NON_ALARM.value: "alarm-off",
ZoneType.MEDICAL_ALARM.value: "medical-bag",
ZoneType.POLICE_ALARM.value: "alarm-light",
ZoneType.POLICE_NO_INDICATION.value: "alarm-light",
ZoneType.KEY_MOMENTARY_ARM_DISARM.value: "power",
ZoneType.KEY_MOMENTARY_ARM_AWAY.value: "power",
ZoneType.KEY_MOMENTARY_ARM_STAY.value: "power",
ZoneType.KEY_MOMENTARY_DISARM.value: "power",
ZoneType.KEY_ON_OFF.value: "toggle-switch",
ZoneType.MUTE_AUDIBLES.value: "volume-mute",
ZoneType.POWER_SUPERVISORY.value: "power-plug",
ZoneType.TEMPERATURE.value: "thermometer-lines",
ZoneType.ANALOG_ZONE.value: "speedometer",
ZoneType.PHONE_KEY.value: "phone-classic",
ZoneType.INTERCOM_KEY.value: "deskphone",
}
return f"mdi:{zone_icons.get(self._element.definition, 'alarm-bell')}"
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs["physical_status"] = ZonePhysicalStatus(
self._element.physical_status
).name.lower()
attrs["logical_status"] = ZoneLogicalStatus(
self._element.logical_status
).name.lower()
attrs["definition"] = ZoneType(self._element.definition).name.lower()
attrs["area"] = self._element.area + 1
attrs["triggered_alarm"] = self._element.triggered_alarm
return attrs
@property
def temperature_unit(self):
"""Return the temperature unit."""
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
return None
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
if self._element.definition == ZoneType.ANALOG_ZONE.value:
return ELECTRIC_POTENTIAL_VOLT
return None
def _element_changed(self, element, changeset):
if self._element.definition == ZoneType.TEMPERATURE.value:
self._state = temperature_to_state(
self._element.temperature, UNDEFINED_TEMPATURE
)
elif self._element.definition == ZoneType.ANALOG_ZONE.value:
self._state = self._element.voltage
else:
self._state = pretty_const(
ZoneLogicalStatus(self._element.logical_status).name
)
|
sander76/home-assistant
|
homeassistant/components/elkm1/sensor.py
|
Python
|
apache-2.0
| 9,367
|
[
"Elk"
] |
33c3f7a4232e180a49a92eeb3e34c191cfa86987353af502e46d9a89d90172ec
|
#
# Bare-bones re-implementation of CMA-ES.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import numpy as np
import pints
import warnings
from numpy.linalg import norm
from scipy.special import gamma
class BareCMAES(pints.PopulationBasedOptimiser):
"""
Finds the best parameters using the CMA-ES method described in [1, 2],
using a bare bones re-implementation.
For general use, we recommend the :class:`pints.CMAES` optimiser, which
wraps around the ``cma`` module provided by the authors of CMA-ES. The
``cma`` module provides a battle-tested version of the optimiser.
The role of this class, is to provide a simpler implementation of only the
core algorithm of CMA-ES, which is easier to read and analyse, and which
can be used to compare with bare implementations of other methods.
Extends :class:`PopulationBasedOptimiser`.
References
----------
.. [1] The CMA Evolution Strategy: A Tutorial
Nikolaus Hanse, arxiv
https://arxiv.org/abs/1604.00772
.. [2] Hansen, Mueller, Koumoutsakos (2003) "Reducing the time complexity
of the derandomized evolution strategy with covariance matrix
adaptation (CMA-ES)". Evolutionary Computation
https://doi.org/10.1162/106365603321828970
"""
def __init__(self, x0, sigma0=0.1, boundaries=None):
super(BareCMAES, self).__init__(x0, sigma0, boundaries)
# Set initial state
self._running = False
self._ready_for_tell = False
# Best solution found
self._xbest = pints.vector(x0)
self._fbest = float('inf')
# Number of iterations run
self._iterations = 0
# Mean of the proposal distribution
self._mu = np.copy(self._x0)
# Step size
self._eta = np.min(self._sigma0)
# Covariance matrix C and decomposition in rotation R and scaling S
# A decomposition C = R S S R.T can be made, such that R is the matrix
# of eigenvectors of C, and S is a diagonal matrix containing the
# square roots of the eigenvalues of C.
# Here, R and S can be interpreted as a rotation and a scaling matrix
# respectively.
# Note that only C is updated directly, while R and S are simply
# recalculated at every step.
self._C = np.identity(self._n_parameters)
self._R = np.identity(self._n_parameters)
self._S = np.identity(self._n_parameters)
# Constant used in tell()
self._e = (
np.sqrt(2)
* gamma((self._n_parameters + 1) / 2)
/ gamma(self._n_parameters / 2)
)
def ask(self):
""" See :meth:`Optimiser.ask()`. """
# Initialise on first call
if not self._running:
self._initialise()
# Ready for tell now
self._ready_for_tell = True
# Create new samples
# Normalised samples: centered at zero and no rotation or scaling
self._zs = np.array([np.random.normal(0, 1, self._n_parameters)
for i in range(self._population_size)])
# Centered samples: centered at zero, with rotation and scaling
self._ys = np.array([self._R.dot(self._S).dot(z) for z in self._zs])
# Samples from N(mu, eta**2 * C)
self._xs = np.array([self._mu + self._eta * y for y in self._ys])
# Apply boundaries; creating safe points for evaluation
# Rectangular boundaries? Then perform boundary transform
if self._boundary_transform is not None:
self._xs = self._boundary_transform(self._xs)
# Manual boundaries? Then pass only xs that are within bounds
if self._manual_boundaries:
self._user_ids = np.nonzero(
[self._boundaries.check(x) for x in self._xs])
self._user_xs = self._xs[self._user_ids]
if len(self._user_xs) == 0: # pragma: no cover
warnings.warn('All points requested by CMA-ES are outside the'
' boundaries.')
else:
self._user_xs = self._xs
# Set as read-only and return
self._user_xs.setflags(write=False)
return self._user_xs
def cov(self, decomposed=False):
"""
Returns the current covariance matrix ``C`` of the proposal
distribution.
If the optional argument ``decomposed`` is set to ``True``, a tuple
``(R, S)`` will be returned such that ``R`` contains the eigenvectors
of ``C`` while ``S`` is a diagonal matrix containing the squares of the
eigenvalues of ``C``, such that ``C = R S S R.T``.
"""
if decomposed:
return self._R, self._S
else:
return np.copy(self._C)
def fbest(self):
""" See :meth:`Optimiser.fbest()`. """
return self._fbest
def mean(self):
"""
Returns the current mean of the proposal distribution.
"""
return np.copy(self._mu)
def _initialise(self):
"""
Initialises the optimiser for the first iteration.
"""
assert (not self._running)
# Create boundary transform, or use manual boundary checking
self._manual_boundaries = False
self._boundary_transform = None
if isinstance(self._boundaries, pints.RectangularBoundaries):
self._boundary_transform = pints.TriangleWaveTransform(
self._boundaries)
elif self._boundaries is not None:
self._manual_boundaries = True
# Parent generation population size
# The parameter parent_pop_size is the mu in the papers. It represents
# the size of a parent population used to update our paramters.
self._parent_pop_size = self._population_size // 2
# Weights, all set equal for the moment
# Sum of all positive weights should be 1
self._W = 1 + np.arange(self._population_size)
self._W = np.log(0.5 * (self._population_size + 1)) - np.log(self._W)
# Inverse of the sum of the first parent weights squared (variance
# effective selection mass)
self._muEff = (
np.sum(self._W[:self._parent_pop_size]) ** 2
/ np.sum(np.square(self._W[:self._parent_pop_size]))
)
# Inverse of the Sum of the last weights squared (variance effective
# selection mass)
self._muEffMinus = (
np.sum(self._W[self._parent_pop_size:]) ** 2
/ np.sum(np.square(self._W[self._parent_pop_size:]))
)
# cummulation, evolution paths, used to update Cov matrix and sigma)
self._pc = np.zeros(self._n_parameters)
self._psig = np.zeros(self._n_parameters)
# learning rate for the mean
self._cm = 1
# Decay rate of the evolution path for C
self._ccov = (4 + self._muEff / self._n_parameters) / (
self._n_parameters + 4 + 2 * self._muEff / self._n_parameters)
# Decay rate of the evolution path for sigma
self._csig = (2 + self._muEff) / (self._n_parameters + 5 + self._muEff)
# See rank-1 vs rank-mu updates
# Learning rate for rank-1 update
self._c1 = 2 / ((self._n_parameters + 1.3) ** 2 + self._muEff)
# Learning rate for rank-mu update
self._cmu = min(
2 * (self._muEff - 2 + 1 / self._muEff)
/ ((self._n_parameters + 2) ** 2 + self._muEff),
1 - self._c1
)
# Damping of the step-size (sigma0) update
self._dsig = 1 + self._csig + 2 * max(
0, np.sqrt((self._muEff - 1) / (self._n_parameters + 1)) - 1)
# Parameters from the Table 1 of [1]
alpha_mu = 1 + self._c1 / self._cmu
alpha_mueff = 1 + 2 * self._muEffMinus / (self._muEff + 2)
alpha_pos_def = \
(1 - self._c1 - self._cmu) / (self._n_parameters * self._cmu)
# Rescale the weights
sum_pos = np.sum(self._W[self._W > 0])
sum_neg = np.sum(self._W[self._W < 0])
scale_pos = 1 / sum_pos
scale_neg = min(alpha_mu, alpha_mueff, alpha_pos_def) / -sum_neg
self._W[self._W > 0] *= scale_pos
self._W[self._W < 0] *= scale_neg
# Update optimiser state
self._running = True
def name(self):
""" See :meth:`Optimiser.name()`. """
return 'Bare-bones CMA-ES'
def running(self):
""" See :meth:`Optimiser.running()`. """
return self._running
def stop(self):
""" See :meth:`Optimiser.stop()`. """
# We use the condition number defined in the pycma code at
# cma/evolution_strategy.py#L2965.
cond = np.diagonal(self._S)
cond = (np.max(cond) / np.min(cond)) ** 2
if cond > 1e14: # pragma: no cover
return 'Ill-conditioned covariance matrix'
return False
def _suggested_population_size(self):
"""
See :meth:`PopulationBasedOptimiser._suggested_population_size().
"""
return 4 + int(3 * np.log(self._n_parameters))
def tell(self, fx):
""" See :meth:`Optimiser.tell()`. """
# Check ask-tell pattern
if not self._ready_for_tell:
raise Exception('ask() not called before tell()')
self._ready_for_tell = False
# Update iteration count
self._iterations += 1
# Some aliases for readability
n = self._n_parameters
npo = self._population_size
npa = self._parent_pop_size
# Manual boundaries? Then reconstruct full fx vector
if self._manual_boundaries and len(fx) < npo:
user_fx = fx
fx = np.ones((npo, )) * float('inf')
fx[self._user_ids] = user_fx
# Order the points from best to worst score
order = np.argsort(fx)
xs = np.array(self._xs[order])
zs = np.array(self._zs[order])
ys = np.array(self._ys[order])
# Update the mean
self._mu += self._cm * np.sum(
((xs[:npa] - self._mu).T * self._W[:npa]).T, axis=0)
# Get the weighted means of y and z
zmeans = np.sum((zs[:npa].T * self._W[:npa]).T, 0)
ymeans = np.sum((ys[:npa].T * self._W[:npa]).T, 0)
# Evolution path of sigma (the step size)
# Note that self._R.dot(zmeans) =
# self._R.dot(np.linalg.inv(self._S)).dot(self._R.T).dot(ymeans)
# Normalizing constants for the evolution path udpate
c = np.sqrt(self._csig * (2 - self._csig) * self._muEff)
self._psig = (1 - self._csig) * self._psig + c * self._R.dot(zmeans)
# In cma/sigma_adaptation.py#L71 they are NOT using exp_size_N0I, but
# instead use a term based on n_parameters.
# Heaviside function helps to stall the of pc if norm ||psig|| is too
# large. This helps to prevent too fast increases in the axes of C when
# the step size is too small.
cond = (
norm(self._psig)
/ np.sqrt(1 - (1 - self._csig) ** (2 * (self._iterations + 1)))
< (1.4 + 2 / (n + 1)) * self._e
)
h_sig = 1 if cond else 0
delta_sig = (1 - h_sig) * self._ccov * (2 - self._ccov)
# Evolution path for the rank-1 update
c = np.sqrt(self._ccov * (2 - self._ccov) * self._muEff)
self._pc = (1 - self._ccov) * self._pc + h_sig * c * ymeans
# Weight changes taken from the tutorial (no explanation is given for
# the change) these weights are used for the rank mu update only.
# They allow to keep positive definiteness according to
# cma/purecma.py#L419
temp_weights = np.copy(self._W)
for i, w in enumerate(self._W):
if w < 0:
temp_weights[i] = w * n / (norm(self._R * zs[i]) ** 2)
# Update the Covariance matrix:
# Calculate the rank 1 update using the evolution path
rank1 = self._c1 * np.outer(self._pc, self._pc)
# Calculate the rank-mu update
yy = np.array([np.outer(y, y) for y in ys]).T
rankmu = self._cmu * np.sum((yy * temp_weights).T, 0)
# Update C
self._C = rank1 + rankmu + self._C * (
1 + delta_sig * self._c1 - self._c1 - self._cmu * sum(self._W))
# Avoid numerical issues by forcing C to be symmetric
self._C = np.triu(self._C) + np.triu(self._C, 1).T
# Update the step size
# Here we are simply looking at the ratio of the length of the
# evolution path vs its expected lenght ( E|Gaussian(0,I)|)
# We use the difference of the ratio with 1 and scale using the
# learning rate and the damping parameters.
self._eta *= np.exp(
self._csig / self._dsig * (norm(self._psig) / self._e - 1))
# Update eigenvectors and eigenvalues of C
eig = np.linalg.eigh(self._C)
self._S = np.sqrt(np.diag(eig[0]))
self._R = eig[1]
# Update xbest and fbest
# Note: The stored values are based on particles, not on the mean of
# all particles! This has the advantage that we don't require an extra
# evaluation at mu to get a pair (mu, f(mu)). The downside is that
# xbest isn't the very best point. However, xbest and mu seem to
# converge quite quickly, so that this difference disappears.
if self._fbest > fx[order[0]]:
self._fbest = fx[order[0]]
self._xbest = xs[0]
def xbest(self):
""" See :meth:`Optimiser.xbest()`. """
return self._xbest
|
martinjrobins/hobo
|
pints/_optimisers/_cmaes_bare.py
|
Python
|
bsd-3-clause
| 13,798
|
[
"Gaussian"
] |
bafd401246e41b9bb4114fa5842b2e36bcef6132c6136f16b4f3fabc8a2bfd2e
|
import numpy as np
from ase import Atoms
from ase.constraints import FixAtoms
from ase.calculators.emt import EMT
from ase.neb import NEB
from ase.visualize import view
from ase.optimize.fire import FIRE as QuasiNewton
from ase.lattice.surface import surface
from ase.lattice.cubic import FaceCenteredCubic
#set the number of images you want
nimages = 5
#some algebra to determine surface normal and the plane of the surface
d3=[2,1,1]
a1=np.array([0,1,1])
d1=np.cross(a1,d3)
a2=np.array([0,-1,1])
d2=np.cross(a2,d3)
#create your slab
slab =FaceCenteredCubic(directions=[d1,d2,d3],
size=(2,1,2),
symbol=('Pt'),
latticeconstant=3.9)
#add some vacuum to your slab
uc = slab.get_cell()
print uc
uc[2] += [0,0,10] #there are ten layers of vacuum
uc = slab.set_cell(uc,scale_atoms=False)
#view the slab to amke sure it is how you expect
view(slab)
#some positions needed to place the atom in the correct place
x1 = 1.379
x2 = 4.137
x3 = 2.759
y1 = 0.0
y2 = 2.238
z1 = 7.165
z2 = 6.439
#Add the adatom to the list of atoms and set constraints of surface atoms.
slab += Atoms('N', [ ((x2+x1)/2,y1,z1+1.5)])
mask = [atom.symbol == 'Pt' for atom in slab]
slab.set_constraint(FixAtoms(mask=mask))
#optimise the initial state
# Atom below step
initial = slab.copy()
initial.set_calculator(EMT())
relax = QuasiNewton(initial)
relax.run(fmax=0.05)
view(initial)
#optimise the initial state
# Atom above step
slab[-1].position = (x3,y2+1,z2+3.5)
final = slab.copy()
final.set_calculator(EMT())
relax = QuasiNewton(final)
relax.run(fmax=0.05)
view(final)
#create a list of images for interpolation
images = [initial]
for i in range(nimages):
images.append(initial.copy())
for image in images:
image.set_calculator(EMT())
images.append(final)
view(images)
#carry out idpp interpolation
neb = NEB(images)
#neb.interpolate('idpp')
neb.interpolate()
#Run NEB calculation
qn = QuasiNewton(neb, trajectory='N_diffusion_lin.traj', logfile='N_diffusion_lin.log')
qn.run(fmax=0.05)
|
misdoro/python-ase
|
doc/tutorials/neb/idpp4.py
|
Python
|
gpl-2.0
| 2,077
|
[
"ASE"
] |
05fcfabfe30c128884367f2b017e5fcc1fdbf247bc488ba8e46cf6bcfd9453b5
|
from PyQt4.Qt import *
from numpy import *
import Avogadro
import sys
import unittest
from util import *
class TestCamera(unittest.TestCase):
def setUp(self):
# create the GLWidget and load the default engines
self.glwidget = Avogadro.GLWidget()
self.glwidget.loadDefaultEngines()
self.molecule = Avogadro.molecules.addMolecule()
self.molecule.addAtom()
self.glwidget.molecule = self.molecule
self.assertNotEqual(self.glwidget.camera, None)
def tearDown(self):
# create the GLWidget and load the default engines
None
def test_parent(self):
self.assertNotEqual(self.glwidget.camera.parent, None)
def test_angleOfViewY(self):
self.assert_(self.glwidget.camera.angleOfViewY)
testReadWriteProperty(self, self.glwidget.camera.angleOfViewY, 40.0, 60.0)
def test_modelview(self):
self.glwidget.camera.modelview
m = self.glwidget.camera.modelview
self.glwidget.camera.modelview = m
def test_various(self):
self.glwidget.camera.applyPerspective()
self.glwidget.camera.applyModelview()
self.glwidget.camera.initializeViewPoint()
dist = self.glwidget.camera.distance(array([0., 0., 0.]))
self.glwidget.camera.translate(array([0., 0., 0.]))
self.glwidget.camera.pretranslate(array([0., 0., 0.]))
self.glwidget.camera.rotate(3.14, array([0., 0., 0.]))
self.glwidget.camera.prerotate(3.14, array([0., 0., 0.]))
self.glwidget.camera.normalize()
def test_axes(self):
self.glwidget.camera.transformedXAxis
self.glwidget.camera.transformedYAxis
self.glwidget.camera.transformedZAxis
self.glwidget.camera.backTransformedXAxis
self.glwidget.camera.backTransformedYAxis
self.glwidget.camera.backTransformedZAxis
def test_project(self):
point = QPoint(10,20)
self.assertEqual(len(self.glwidget.camera.unProject(point)), 3)
self.assertEqual(len(self.glwidget.camera.unProject(point, array([1., 0., 0.]))), 3)
# added to fix name conflict WithZ
self.assertEqual(len(self.glwidget.camera.unProjectWithZ(array([1., 2., 0.]))), 3)
self.assertEqual(len(self.glwidget.camera.project(array([1., 2., 3.]))), 3)
if __name__ == "__main__":
# create a new application
# (must be done before creating a GLWidget)
app = QApplication(sys.argv)
unittest.main()
sys.exit(app.exec_())
|
rcplane/periodicdisplay
|
reference/avogadro/libavogadro/src/python/unittest/camera.py
|
Python
|
gpl-2.0
| 2,353
|
[
"Avogadro"
] |
e436acf2ff7a2dd60f5e59fd8db3372b6ed1569fac369a6905825ba0db9de3ff
|
# -*- coding: utf-8 -*-
"""
Climate Data Module
@author: Mike Amy
"""
from calendar import isleap
from collections import OrderedDict
from datetime import date, timedelta
from math import floor
from gluon import current
# @ToDo: Nasty!
db = current.db
same = lambda x: x
# keyed off the units field in the sample_table_spec table
standard_unit = {
"in": same,
"out": same
}
# be careful to use floats for all of these numbers
units_in_out = {
"Celsius": {
"in": lambda celsius: celsius + 273.15,
"out": lambda kelvin: kelvin - 273.15
},
"Fahreinheit": {
"in": lambda fahreinheit: (fahreinheit + 459.67) + (5.0/9.0),
"out": lambda kelvin: (kelvin * (9.0/5.0)) - 459.67
},
"Kelvin": standard_unit,
"hPa": standard_unit,
"Pa": {
"in": lambda pascals: pascals / 100.0,
"out": lambda hectopascals: hectopascals * 100.0
},
"mm": standard_unit,
"kg m-2 s-1": {
"in": lambda precipitation_rate: precipitation_rate * 2592000.0,
"out": lambda mm: mm / 2592000.0
},
"%": {
"in": lambda x: x / 100.0,
"out": lambda x: x * 100.0
},
"ratio": standard_unit,
"m/s": standard_unit,
}
# date handling
start_year = 2011
start_month_0_indexed = 10
start_date = date(start_year, start_month_0_indexed+1, 11)
start_day_number = start_date.toordinal()
class DateMapping(object):
def __init__(date_mapper, from_year_month_day, from_date, to_date):
date_mapper.year_month_day_to_time_period = from_year_month_day
date_mapper.date_to_time_period = from_date
date_mapper.to_date= to_date
def date_to_month_number(date):
"""This function converts a date to a month number.
See also year_month_to_month_number(year, month)
"""
return year_month_to_month_number(date.year, date.month)
def year_month_to_month_number(year, month, day=None):
"""Time periods are integers representing months in years,
from 1960 onwards.
e.g. 0 = Jan 1960, 1 = Feb 1960, 12 = Jan 1961
This function converts a year and month to a month number.
"""
return ((year-start_year) * 12) + (month-1) - start_month_0_indexed
def month_number_to_year_month(month_number):
month_number += start_month_0_indexed
return (month_number / 12)+start_year, ((month_number % 12) + 1)
def month_number_to_date(month_number):
year, month = month_number_to_year_month(month_number)
return date(year, month, 1)
def rounded_date_to_month_number(date):
"""This function converts a date to a month number by rounding
to the nearest 12th of a year.
See also date_to_month_number(year, month)
"""
timetuple = date.timetuple()
year = timetuple.tm_year
day_of_year = timetuple.tm_yday
month0 = floor(((day_of_year / (isleap(year) and 366.0 or 365.0)) * 12) + 0.5)
return ((year-start_year) * 12) + (month0) - start_month_0_indexed
def floored_twelfth_of_a_year(date):
"""This function converts a date to a month number by flooring
to the nearest 12th of a year.
"""
timetuple = date.timetuple()
year = timetuple.tm_year
day_of_year = timetuple.tm_yday
month0 = floor((day_of_year / (isleap(year) and 366.0 or 365.0)) * 12)
return ((year-start_year) * 12) + (month0) - start_month_0_indexed
def floored_twelfth_of_a_360_day_year(date):
"""This function converts a date to a month number by flooring
to the nearest 12th of a 360 day year. Used by PRECIS projection.
"""
timetuple = date.timetuple()
year = timetuple.tm_year
day_of_year = timetuple.tm_yday
month0 = floor((day_of_year / 360) * 12)
return ((year-start_year) * 12) + (month0) - start_month_0_indexed
#import logging
#logging.warn("NetCDF imports using unusual date semantics might not work")
monthly = DateMapping(
from_year_month_day = year_month_to_month_number,
from_date = date_to_month_number, # Different for different data sets!
to_date = month_number_to_date
)
def date_to_day_number(date):
return date.toordinal() - start_day_number
def year_month_day_to_day_number(year, month, day):
return date_to_day_number(date(year, month, day))
def day_number_to_date(day_number):
return start_date + timedelta(days=day_number)
daily = DateMapping(
from_year_month_day = year_month_day_to_day_number,
from_date = date_to_day_number,
to_date = day_number_to_date
)
class Observed(object):
code = "O"
Observed.__name__ = "Observed Station"
class Gridded(object):
code = "G"
Gridded.__name__ = "Observed Gridded"
class Projected(object):
code = "P"
sample_table_types = (Observed, Gridded, Projected)
sample_table_types_by_code = OrderedDict()
for SampleTableType in sample_table_types:
sample_table_types_by_code[SampleTableType.code] = SampleTableType
class SampleTable(object):
# Samples always have places and time (periods)
# This format is used for daily data and monthly aggregated data.
# Performance matters, and we have lots of data,
# so unnecessary bytes are shaved as follows:
# 1. Sample tables don't need an id - the time and place is the key
# 2. The smallest interval is one day, so time_period as smallint (65536)
# instead of int, allows a 179 year range, from 1950 to 2129.
# Normally we'll be dealing with months however, where this is
# even less of an issue.
# 3. The value field type can be real, int, smallint, decimal etc.
# Double is overkill for climate data.
# Take care with decimal though - calculations may be slower.
# These tables are not web2py tables as we don't want web2py messing with
# them. The database IO is done directly to postgres for speed.
# We don't want web2py messing with or complaining about the schemas.
# It is likely we will need spatial database extensions i.e. PostGIS.
# May be better to cluster places by region.
__date_mapper = {
"daily": daily,
"monthly": monthly
}
__objects = {}
__names = OrderedDict()
@staticmethod
def with_name(name):
return SampleTable.__names[name]
__by_ids = {}
@staticmethod
def with_id(id):
SampleTable_by_ids = SampleTable.__by_ids
return SampleTable_by_ids[id]
@staticmethod
def name_exists(name, error):
if name in SampleTable.__names:
return True
else:
error(
"Available data sets are: %s" % SampleTable.__names.keys()
)
return False
@staticmethod
def matching(
parameter_name,
sample_type_code
):
try:
return SampleTable.__objects[(parameter_name, sample_type_code)]
except KeyError:
pass
#print SampleTable.__objects.keys()
@staticmethod
def add_to_client_config_dict(config_dict):
data_type_option_names = []
for SampleTableType in sample_table_types:
data_type_option_names.append(SampleTableType.__name__)
parameter_names = []
for name, sample_table in SampleTable.__names.iteritems():
if sample_table.date_mapping_name == "monthly":
parameter_names.append(name)
config_dict.update(
data_type_option_names = data_type_option_names,
parameter_names = parameter_names
)
def __init__(
sample_table,
db,
name, # please change to parameter_name
date_mapping_name,
field_type,
units_name,
grid_size,
sample_type = None,
sample_type_code = None,
id = None
):
parameter_name = name
assert units_name in units_in_out.keys(), \
"units must be one of %s" % units_in_out.keys()
assert sample_type is None or sample_type in sample_table_types
assert (sample_type is not None) ^ (sample_type_code is not None), \
"either parameters sample_type or sample_type_code must be set"
sample_table_type = sample_type or sample_table_types_by_code[sample_type_code]
if id is not None:
if id in SampleTable.__by_ids:
# other code shouldn't be creating SampleTables that already
# exist. Or, worse, different ones with the same id.
raise Exception(
"SampleTable %i already exists. "
"Use SampleTable.with_id(%i) instead." % (id, id)
)
#return SampleTable.__by_ids[id]
else:
sample_table.set_id(id)
SampleTable.__by_ids[id] = sample_table
sample_table.type = sample_table_type
sample_table.units_name = units_name
sample_table.parameter_name = parameter_name
sample_table.date_mapping_name = date_mapping_name
sample_table.date_mapper = SampleTable.__date_mapper[date_mapping_name]
sample_table.field_type = field_type
sample_table.grid_size = grid_size
sample_table.db = db
SampleTable.__objects[
(parameter_name, sample_table.type.code)
] = sample_table
SampleTable.__names["%s %s" % (
sample_table.type.__name__,
parameter_name
)] = sample_table
def __repr__(sample_table):
return '%s %s' % (
sample_table.type.__name__,
sample_table.parameter_name
)
def __str__(sample_table):
return '"%s"' % repr(sample_table)
@staticmethod
def table_name(id):
return "climate_sample_table_%i" % id
def set_id(sample_table,id):
sample_table.id = id
sample_table.table_name = SampleTable.table_name(id)
def find(
sample_table,
found,
not_found
):
db = sample_table.db
existing_table_query = db(
(db.climate_sample_table_spec.name == sample_table.parameter_name) &
(db.climate_sample_table_spec.sample_type_code == sample_table.type.code)
)
existing_table = existing_table_query.select().first()
if existing_table is None:
not_found()
else:
found(
existing_table_query,
SampleTable.table_name(existing_table.id),
)
def create(sample_table, use_table_name):
def create_table():
db = sample_table.db
sample_table.set_id(
db.climate_sample_table_spec.insert(
sample_type_code = sample_table.type.code,
name = sample_table.parameter_name,
units = sample_table.units_name,
field_type = sample_table.field_type,
date_mapping = sample_table.date_mapping_name,
grid_size = sample_table.grid_size
)
)
db.executesql(
"""
CREATE TABLE %(table_name)s
(
place_id integer NOT NULL,
time_period smallint NOT NULL,
value %(field_type)s NOT NULL,
CONSTRAINT %(table_name)s_primary_key
PRIMARY KEY (place_id, time_period),
CONSTRAINT %(table_name)s_place_id_fkey
FOREIGN KEY (place_id)
REFERENCES climate_place (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE
);
""" % sample_table.__dict__
)
use_table_name(sample_table.table_name)
def complain_that_table_already_exists(
query,
existing_table_name
):
raise Exception(
"Table for %s %s already exists as '%s'" % (
sample_table.type.__name__,
sample_table.parameter_name,
existing_table_name
)
)
return sample_table.find(
not_found = create_table,
found = complain_that_table_already_exists
)
def create_indices(sample_table):
db = sample_table.db
for field in (
"time_period",
"place_id",
"value"
):
db.executesql(
"CREATE INDEX %(table_name)s_%(field)s__idx "
"on %(table_name)s(%(field)s);" % dict(
sample_table.__dict__,
field = field
)
)
use_table_name(sample_table.table_name)
def drop(sample_table, use_table_name):
db = sample_table.db
def complain_that_table_does_not_exist():
raise Exception(
"%s %s table not found" % (
sample_table.sample_type_name,
sample_table.parameter_name,
)
)
def delete_table(
existing_table_query,
existing_table_name,
):
existing_table_query.delete()
db.executesql(
"DROP TABLE %s;" % existing_table_name
)
db.commit()
use_table_name(existing_table_name)
return sample_table.find(
not_found = complain_that_table_does_not_exist,
found = delete_table
)
def clear(sample_table):
sample_table.db.executesql(
"TRUNCATE TABLE %s;" % sample_table.table_name
)
def insert_values(sample_table, values):
sql = "INSERT INTO %s (time_period, place_id, value) VALUES %s;" % (
sample_table.table_name,
",".join(values)
)
try:
sample_table.db.executesql(sql)
except:
print sql
raise
def pull_real_time_data(sample_table):
import_sql = (
"SELECT AVG(value), station_id, obstime "
"FROM weather_data_nepal "
"WHERE parameter = 'T' "
"GROUP BY station_id, obstime"
"ORDER BY station_id, obstime;"
)
sample_table.cldb.executesql(
import_sql
)
def csv_data(
sample_table,
place_id,
date_from,
date_to
):
sample_table_id = sample_table.id
date_mapper = sample_table.date_mapper
start_date_number = date_mapper.date_to_time_period(date_from)
end_date_number = date_mapper.date_to_time_period(date_to)
data = [
"date,"+sample_table.units_name
]
for record in db.executesql(
"SELECT * "
"FROM climate_sample_table_%(sample_table_id)i "
"WHERE time_period >= %(start_date_number)i "
"AND place_id = %(place_id)i "
"AND time_period <= %(end_date_number)i"
"ORDER BY time_period ASC;" % locals()
):
place_id, time_period, value = record
date_format = {
monthly: "%Y-%m",
daily: "%Y-%m-%d"
}[date_mapper]
data.append(
",".join((
date_mapper.to_date(time_period).strftime(date_format),
str(value)
))
)
data.append("")
return "\n".join(data)
def get_available_years(
sample_table
):
years = []
for (year,) in db.executesql(
"SELECT sub.year FROM ("
"SELECT (((time_period + %(start_month_0_indexed)i) / 12) + %(start_year)i)"
" AS year "
"FROM climate_sample_table_%(sample_table_id)i "
") as sub GROUP BY sub.year;" % dict(
start_year = start_year,
start_month_0_indexed = start_month_0_indexed,
sample_table_id = sample_table.id
)
):
years.append(year)
return years
def init_SampleTable():
"""
"""
table = current.s3db.climate_sample_table_spec
for SampleTableType in sample_table_types:
query = (table.sample_type_code == SampleTableType.code)
rows = db(query).select(orderby=table.name)
for sample_table_spec in rows:
sample_type_code = sample_table_spec.sample_type_code
parameter_name = sample_table_spec.name
sample_type = sample_table_types_by_code[sample_table_spec.sample_type_code]
date_mapper = SampleTable._SampleTable__date_mapper
SampleTable(
name = sample_table_spec.name,
id = sample_table_spec.id,
units_name = sample_table_spec.units,
field_type = sample_table_spec.field_type,
date_mapping_name = sample_table_spec.date_mapping,
sample_type = sample_type,
grid_size = sample_table_spec.grid_size,
db = db
)
init_SampleTable()
from MapPlugin import MapPlugin
|
flavour/ifrc_qa
|
modules/ClimateDataPortal/__init__.py
|
Python
|
mit
| 17,081
|
[
"NetCDF"
] |
914cd75e0f5a90eeb70ae3060261a20e87bc0e6bde4d1403b4eae8417413cf50
|
class Node(object):
_fields = tuple()
def __init__(self):
self.__info__ = {}
def is_ancestor_of(self, other):
if self is other:
return True
for value in self.__dict__.values():
if isinstance(value, list):
for item in value:
if isinstance(item, Node) and item.is_ancestor_of(other):
return True
elif isinstance(value, Node) and value.is_ancestor_of(other):
return True
return False
def to_dict(self):
data = {}
for attr, value in self.__dict__.items():
if isinstance(value, list):
values = []
for item in value:
if isinstance(item, Node):
values.append(item.to_dict())
else:
values.append(item)
data[attr] = values
elif isinstance(value, Node):
data[attr] = value.to_dict()
else:
data[attr] = value
return {self.__class__.__name__: data}
class ModuleDecl(Node):
_fields = ('name', 'body',)
def __init__(self, name, body):
super().__init__()
self.name = name
self.body = body
def __str__(self):
return '\n'.join(map(str, self.body.statements))
class Block(Node):
_fields = ('statements',)
def __init__(self, statements=None):
super().__init__()
self.statements = statements or []
def __str__(self):
result = '{\n'
for statement in self.statements:
result += '\n'.join(' ' + line for line in str(statement).split('\n'))
result += '\n'
return result + '}'
class PropertyDecl(Node):
_fields = ('name', 'attributes', 'type_annotation', 'initializer', 'getter', 'setter',)
def __init__(
self, name, attributes=None, type_annotation=None, initializer=None,
getter=None, setter=None):
super().__init__()
self.name = name
self.attributes = attributes or []
self.type_annotation = type_annotation
self.initializer = initializer
self.getter = getter
self.setter = setter
def __str__(self):
if 'shared' in self.attributes:
result = 'shd ' + self.name
attributes = sorted(filter(lambda attr: attr != 'shared', self.attributes))
elif 'mutable' in self.attributes:
result = 'mut ' + self.name
attributes = sorted(filter(lambda attr: attr != 'mutable', self.attributes))
else:
result = 'cst ' + self.name
attributes = sorted(self.attributes)
if self.attributes:
result = ' '.join('@' + str(attr) for attr in attributes) + ' ' + result
if self.type_annotation:
result += ': ' + str(self.type_annotation)
if self.initializer:
result += ' = ' + str(self.initializer)
if self.getter or self.setter:
result += ' {\n'
if self.getter:
result += '\n'.join(' ' + line for line in str(self.getter).split('\n'))
result += '\n'
if self.setter:
result += '\n'.join(' ' + line for line in str(self.setter).split('\n'))
result += '\n'
result += '}'
return result
class FunctionDecl(Node):
_fields = ('name', 'signature', 'body', 'generic_parameters', 'where_clause',)
def __init__(
self, name, signature, body=None, attributes=None,
generic_parameters=None, where_clause=None):
super().__init__()
self.name = name
self.signature = signature
self.body = body
self.attributes = attributes or []
self.generic_parameters = generic_parameters or []
self.where_clause = where_clause
def __str__(self):
result = 'fun {}'.format(self.name)
if self.attributes:
result = ' '.join('@' + str(attr) for attr in self.attributes) + ' ' + result
if self.generic_parameters:
result += '<{}>'.format(', '.join(map(str, self.generic_parameters)))
result += str(self.signature)
if self.where_clause:
result += ' where ' + str(self.where_clause)
if self.body:
result += ' ' + str(self.body)
return result
class FunctionParameterDecl(Node):
_fields = ('name', 'label', 'type_annotation', 'attributes', 'default_value',)
def __init__(self, name, label, type_annotation, attributes=None, default_value=None):
super().__init__()
self.name = name
self.label = label
self.type_annotation = type_annotation
self.attributes = attributes or []
self.default_value = default_value
def __str__(self):
if 'shared' in self.attributes:
result = 'shd'
elif 'mutable' in self.attributes:
result = 'mut'
else:
result = 'cst'
if self.name != self.label:
result += ' ' + str(self.label or '_')
result += ' ' + self.name
if self.type_annotation:
result += ': ' + str(self.type_annotation)
if self.default_value:
result += ' = ' + str(self.default_value)
return result
class StructDecl(Node):
_fields = (
'name', 'body', 'attributes', 'generic_parameters',
'conformance_list', 'import_list', 'where_clause',)
def __init__(
self, name, body, attributes=None, generic_parameters=None,
conformance_list=None, import_list=None, where_clause=None):
super().__init__()
self.name = name
self.body = body
self.attributes = attributes or []
self.generic_parameters = generic_parameters or []
self.conformance_list = conformance_list or []
self.import_list = import_list or []
self.where_clause = where_clause
def __str__(self):
result = 'struct ' + self.name
if self.attributes:
result = ' '.join('@' + str(attr) for attr in self.attributes) + ' ' + result
if self.generic_parameters:
result += '<{}>'.format(', '.join(map(str, self.generic_parameters)))
if self.conformance_list:
result += ': ' + ', '.join(map(str, self.conformance_list))
if self.import_list:
result += ' import ' + ', '.join(map(str, self.import_list))
if self.where_clause:
result += ' where ' + str(self.where_clause)
return result + ' ' + str(self.body)
class EnumCaseParameterDecl(Node):
_fields = ('label', 'type_annotation',)
def __init__(self, label, type_annotation):
super().__init__()
self.label = label
self.type_annotation = type_annotation
def __str__(self):
return '{}: {}'.format(self.label or '_', self.type_annotation)
class EnumCaseDecl(Node):
_fields = ('name', 'parameters', 'attributes',)
def __init__(self, name, parameters=None, attributes=None):
super().__init__()
self.name = name
self.parameters = parameters or []
self.attributes = attributes or []
def __str__(self):
result = 'case ' + self.name
if self.attributes:
result = ' '.join('@' + str(attr) for attr in self.attributes) + ' ' + result
if self.parameters:
result += '({})'.format(', '.join(map(str, self.parameters)))
return result
class EnumDecl(Node):
_fields = (
'name', 'body', 'attributes', 'generic_parameters',
'conformance_list', 'import_list', 'where_clause',)
def __init__(
self, name, body, attributes=None, generic_parameters=None,
conformance_list=None, import_list=None, where_clause=None):
super().__init__()
self.name = name
self.body = body
self.attributes = attributes or []
self.generic_parameters = generic_parameters or []
self.conformance_list = conformance_list or []
self.import_list = import_list or []
self.where_clause = where_clause
def __str__(self):
result = 'enum ' + self.name
if self.attributes:
result = ' '.join('@' + str(attr) for attr in self.attributes) + ' ' + result
if self.generic_parameters:
result += '<{}>'.format(', '.join(map(str, self.generic_parameters)))
if self.conformance_list:
result += ': ' + ', '.join(map(str, self.conformance_list))
if self.import_list:
result += ' import ' + ', '.join(map(str, self.import_list))
if self.where_clause:
result += ' where ' + str(self.where_clause)
return result + ' ' + str(self.body)
class ProtocolDecl(Node):
_fields = ('name', 'body', 'attributes', 'conformance_list', 'import_list')
def __init__(self, name, body, attributes=None, conformance_list=None, import_list=None):
super().__init__()
self.name = name
self.body = body
self.attributes = attributes
self.conformance_list = conformance_list or []
self.import_list = import_list or []
def __str__(self):
result = 'protocol ' + self.name
if self.attributes:
result = ' '.join('@' + str(attr) for attr in self.attributes) + ' ' + result
if self.conformance_list:
result += ': ' + ', '.join(map(str(self.conformance_list)))
if self.import_list:
result += ' import ' + ', '.join(map(str, self.import_list))
return result + ' ' + str(self.body)
class AbstractTypeDecl(Node):
_fields = ('name', 'conformance_list', 'value', 'attributes',)
def __init__(self, name, conformance_list=None, value=None, attributes=None):
super().__init__()
self.name = name
self.conformance_list = conformance_list or []
self.value = value
self.attributes = attributes or []
def __str__(self):
result = 'abs ' + self.name
if self.attributes:
result = ' '.join('@' + str(attr) for attr in self.attributes) + ' ' + result
if self.conformance_list:
result += ': ' + ', '.join(map(str(self.conformance_list)))
if self.value:
result += ' = ' + str(self.value)
return result
class ExtensionDecl(Node):
_fields = ('subject', 'declaration', 'where_clause',)
def __init__(self, subject, declaration, where_clause=None):
super().__init__()
self.subject = subject
self.declaration = declaration
self.where_clause = where_clause
def __str__(self):
result = 'extension ' + self.subject
if self.where_clause:
result += 'where ' + str(self.where_clause)
return result + ' -> ' + str(self.declaration)
class SignatureParameter(Node):
_fields = ('label', 'type_annotation', 'attributes',)
def __init__(self, label, type_annotation, attributes=None,):
super().__init__()
self.label = label
self.type_annotation = type_annotation
self.attributes = attributes or []
def __str__(self):
if 'shared' in self.attributes:
result = 'shd'
attributes = sorted(filter(lambda attr: attr != 'mutable', self.attributes))
elif 'mutable' in self.attributes:
result = 'mut'
attributes = sorted(filter(lambda attr: attr != 'mutable', self.attributes))
else:
result = 'cst'
attributes = sorted(self.attributes)
result += ' ' + (self.label or '_')
if self.type_annotation:
result += ': ' + str(self.type_annotation)
return result
class FunctionSignature(Node):
_fields = ('parameters', 'return_type',)
def __init__(self, return_type, parameters=None):
super().__init__()
self.parameters = parameters
self.return_type = return_type
def __str__(self):
return '({}) -> {}'.format(', '.join(map(str, self.parameters)), self.return_type)
class TupleSignature(Node):
_fields = ('parameters',)
def __init__(self, parameters):
super().__init__()
self.parameters = parameters
def __str__(self):
return '({})'.format(', '.join(map(str, self.parameters)))
class Identifier(Node):
_fields = ('name', 'specializations',)
def __init__(self, name, specializations=None):
super().__init__()
self.name = name
self.specializations = specializations or []
@property
def qualname(self):
if 'scope' in self.__info__:
return '{}.{}'.format(self.__info__['scope'].name, self.name)
return self.name
def __str__(self):
if self.specializations:
return '{}<{}>'.format(self.name, ', '.join(map(str, self.specializations)))
return self.name
class SpecializationArgument(Node):
_fields = ('name', 'value',)
def __init__(self, name, value):
super().__init__()
self.name = name
self.value = value
def __str__(self):
return '{} = {}'.format(self.name, self.value)
class Literal(Node):
_fields = ('value',)
def __init__(self, value):
super().__init__()
self.value = value
def __str__(self):
return str(self.value)
class ArrayLiteral(Node):
_fields = ('items',)
def __init__(self, items=None):
super().__init__()
self.items = items or []
def __str__(self):
return '[{}]'.format(', '.join(map(str, self.items)))
class DictionaryLiteralItem(Node):
_fields = ('key', 'value',)
def __init__(self, key, value):
super().__init__()
self.key = key
self.value = value
def __str__(self):
return '{}: {}'.format(self.key, self.value)
class DictionaryLiteral(Node):
_fields = ('items',)
def __init__(self, items=None):
super().__init__()
self.items = items or []
def __str__(self):
if self.items:
return '[{}]'.format(', '.join(map(str, self.items)))
return '[:]'
class TupleItemDecl(Node):
_fields = ('label', 'attributes', 'type_signature', 'initializer')
def __init__(self, label, initializer, attributes=None, type_annotation=None):
super().__init__()
self.label = label
self.initializer = initializer
self.attributes = attributes or []
self.type_annotation = type_annotation
def __str__(self):
if 'shared' in self.attributes:
result = 'shd ' + (self.label or '_')
attributes = sorted(filter(lambda attr: attr != 'shared', self.attributes))
elif 'mutable' in self.attributes:
result = 'mut ' + (self.label or '_')
attributes = sorted(filter(lambda attr: attr != 'mutable', self.attributes))
else:
result = 'cst ' + (self.label or '_')
attributes = sorted(self.attributes)
if self.attributes:
result = ' '.join('@' + str(attr) for attr in attributes) + ' ' + result
if self.type_annotation:
result += ': ' + str(self.type_annotation)
return result + ' = ' + str(self.initializer)
class Tuple(Node):
_fields = ('items',)
def __init__(self, items):
super().__init__()
self.items = items
def __str__(self):
return '({})'.format(', '.join(map(str, self.items)))
class Closure(Node):
_fields = ('statements', 'parameters',)
def __init__(self, statements, parameters=None):
super().__init__()
self.statements = statements
self.parameters = parameters or []
def __str__(self):
statements = ''
for statement in self.statements:
statements += '\n'.join(' ' + line for line in str(statement).split('\n'))
statements += '\n'
if self.parameters:
return '{{ {} in\n{}}}'.format(', '.join(map(str, self.parameters)), statements)
return '{{\n{}}}'.format(statements)
class CallArgument(Node):
_fields = ('value', 'label', 'attributes',)
def __init__(self, value, label=None, attributes=None):
super().__init__()
self.value = value
self.label = label
self.attributes = attributes or []
def __str__(self):
if self.label:
result = self.label + ' = '
else:
result = ''
if self.attributes:
result += ' '.join('@' + str(attr) for attr in self.attributes) + ' '
return result + str(self.value)
class Call(Node):
_fields = ('callee', 'arguments',)
def __init__(self, callee, arguments=None):
super().__init__()
self.callee = callee
self.arguments = arguments or []
def __str__(self):
return '{}({})'.format(self.callee, ', '.join(map(str, self.arguments)))
class Subscript(Node):
_fields = ('callee', 'arguments',)
def __init__(self, callee, arguments=None):
super().__init__()
self.callee = callee
self.arguments = arguments or []
def __str__(self):
return '{}[{}]'.format(self.callee, ', '.join(map(str, self.arguments)))
class Select(Node):
_fields = ('owner', 'member',)
def __init__(self, owner, member):
super().__init__()
self.owner = owner
self.member = member
def __str__(self):
return '{}.{}'.format(self.owner, self.member)
class ImplicitSelect(Node):
_fields = ('member',)
def __init__(self, member):
super().__init__()
self.member = member
def __str__(self):
return '.' + str(self.member)
class PrefixExpression(Node):
_fields = ('operator', 'operand',)
def __init__(self, operator, operand):
super().__init__()
self.operator = operator
self.operand = operand
def __str__(self):
return '{} {}'.format(self.operator, self.operand)
class PostfixExpression(Node):
_fields = ('operator', 'operand',)
def __init__(self, operator, operand):
super().__init__()
self.operator = operator
self.operand = operand
def __str__(self):
return '{} {}'.format(self.operand, self.operator)
class BinaryExpression(Node):
_fields = ('operator', 'left', 'right',)
def __init__(self, operator, left, right):
super().__init__()
self.operator = operator
self.left = left
self.right = right
def __str__(self):
return '{} {} {}'.format(self.left, self.operator, self.right)
class ValueBindingPattern(Node):
_fields = ('name', 'type_annotation', 'attributes',)
def __init__(self, name, type_annotation=None, attributes=None):
super().__init__()
self.name = name
self.type_annotation = type_annotation
self.attributes = attributes or []
def __str__(self):
if 'shared' in self.attributes:
result = 'shd ' + self.name
attributes = sorted(filter(lambda attr: attr != 'mutable', self.attributes))
elif 'mutable' in self.attributes:
result = 'mut ' + self.name
attributes = sorted(filter(lambda attr: attr != 'mutable', self.attributes))
else:
result = 'cst ' + self.name
attributes = sorted(self.attributes)
if self.type_annotation:
result += ': ' + str(self.type_annotation)
return result
class PatternArgument(Node):
_fields = ('label', 'value',)
def __init__(self, value, label=None):
super().__init__()
self.label = label
self.value = value
def __str__(self):
if self.label:
return '{} = {}'.format(self.label, self.value)
return str(self.value)
class TuplePattern(Node):
_fields = ('items',)
def __init__(self, items=None):
super().__init__()
self.items = items or []
def __str__(self):
return '({})'.format(', '.join(map(str, self.items)))
class EnumCasePattern(Node):
_fields = ('case', 'arguments',)
def __init__(self, case, arguments=None):
super().__init__()
self.case = case
self.arguments = arguments or []
def __str__(self):
if self.arguments:
return '{}({})'.format(self.case, ''.join(map(str, self.arguments)))
return str(self.case)
class WildcardPattern(Node):
def __str__(self):
return '_'
class Pattern(Node):
_fields = ('expression', 'where_clause',)
def __init__(self, expression, where_clause=None):
super().__init__()
self.expression = expression
self.where_clause = where_clause
def __str__(self):
if self.where_clause:
return '{} where {}'.format(self.expression, self.where_clause)
return str(self.expression)
class MatchingPattern(Node):
_fields = ('value', 'pattern',)
def __init__(self, value, pattern):
super().__init__()
self.value = value
self.pattern = pattern
def __str__(self):
return '{} ~= {}'.format(self.value, self.pattern)
class If(Node):
_fields = ('condition', 'body', 'else_clause',)
def __init__(self, condition, body, else_clause=None):
super().__init__()
self.condition = condition
self.body = body
self.else_clause = else_clause
def __str__(self):
if self.else_clause:
return 'if {} {} else {}'.format(self.condition, self.body, self.else_clause)
return 'if {} {}'.format(self.condition, self.body)
class SwitchCaseClause(Node):
_fields = ('pattern', 'body',)
def __init__(self, pattern, body):
super().__init__()
self.pattern = pattern
self.body = body
def __str__(self):
return 'case {} {}'.format(self.pattern, self.body)
class Switch(Node):
_fields = ('expression', 'clauses',)
def __init__(self, expression, clauses=None):
super().__init__()
self.expression = expression
self.clauses = clauses or []
def __str__(self):
clauses = ''
for clause in self.clauses:
clauses += '\n'.join(' ' + line for line in str(clause).split('\n'))
clauses += '\n'
return 'switch {} {{\n{}}}'.format(self.expression, clauses)
class Assignment(Node):
_fields = ('lvalue', 'rvalue',)
def __init__(self, lvalue, rvalue):
super().__init__()
self.lvalue = lvalue
self.rvalue = rvalue
def __str__(self):
return '{} = {}'.format(self.lvalue, self.rvalue)
class SelfAssignement(Node):
_fields = ('operator', 'lvalue', 'rvalue',)
def __init__(self, operator, lvalue, rvalue):
super().__init__()
self.operator = operator
self.lvalue = lvalue
self.rvalue = rvalue
def __str__(self):
return '{} {} {}'.format(self.lvalue, self.operator, self.rvalue)
class Return(Node):
_fields = ('value',)
def __init__(self, value):
super().__init__()
self.value = value
def __str__(self):
return 'return ' + str(self.value)
class Break(Node):
_fields = ('label',)
def __init__(self, label=None):
super().__init__()
self.label = label
def __str__(self):
if self.label:
return 'break ' + self.label
return 'break'
class Continue(Node):
_fields = ('label',)
def __init__(self, label=None):
super().__init__()
self.label = label
def __str__(self):
if self.label:
return 'continue ' + self.label
return 'continue'
class For(Node):
_fields = ('iterator', 'sequence', 'body', 'label',)
def __init__(self, iterator, sequence, body, label=None):
super().__init__()
self.iterator = iterator
self.sequence = sequence
self.body = body
self.label = label
def __str__(self):
result = 'for {} in {} {}'.format(self.iterator, self.sequence, self.body)
if self.label:
return self.label + ': ' + result
return result
class While(Node):
_fields = ('condition', 'body', 'label',)
def __init__(self, condition, body, label=None):
super().__init__()
self.condition = condition
self.body = body
self.label = label
def __str__(self):
result = 'while {} {}'.format(self.condition, self.body)
if self.label:
return self.label + ': ' + result
return result
class Visitor(object):
def visit(self, node):
method_name = 'visit_' + node.__class__.__name__
return getattr(self, method_name, self.generic_visit)(node)
def generic_visit(self, node):
for attr in node._fields:
value = getattr(node, attr)
if isinstance(value, list):
for item in value:
if isinstance(item, Node):
self.visit(item)
elif isinstance(value, Node):
self.visit(value)
class Transformer(Visitor):
def generic_visit(self, node):
for attr in node._fields:
value = getattr(node, attr)
if isinstance(value, list):
new_values = []
for item in value:
if isinstance(item, Node):
new_value = self.visit(item)
if isinstance(new_value, list):
new_values.extend(value)
elif isinstance(new_value, Node):
new_values.append(new_value)
else:
new_values.append(item)
setattr(node, attr, new_values)
elif isinstance(value, Node):
new_node = self.visit(value)
setattr(node, attr, new_node)
return node
|
kyouko-taiga/tango
|
tango/ast.py
|
Python
|
apache-2.0
| 26,571
|
[
"VisIt"
] |
88e90e6d46b78c4196ca871c491be4f23a1503b096c4886937e85956e6c35696
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import hs_core.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('pages', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
('hs_core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='NetcdfMetaData',
fields=[
('coremetadata_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='hs_core.CoreMetaData')),
],
options={
},
bases=('hs_core.coremetadata',),
),
migrations.CreateModel(
name='NetcdfResource',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')),
('comments_count', models.IntegerField(default=0, editable=False)),
('rating_count', models.IntegerField(default=0, editable=False)),
('rating_sum', models.IntegerField(default=0, editable=False)),
('rating_average', models.FloatField(default=0, editable=False)),
('public', models.BooleanField(default=True, help_text=b'If this is true, the resource is viewable and downloadable by anyone')),
('frozen', models.BooleanField(default=False, help_text=b'If this is true, the resource should not be modified')),
('do_not_distribute', models.BooleanField(default=False, help_text=b'If this is true, the resource owner has to designate viewers')),
('discoverable', models.BooleanField(default=True, help_text=b'If this is true, it will turn up in searches.')),
('published_and_frozen', models.BooleanField(default=False, help_text=b'Once this is true, no changes can be made to the resource')),
('content', models.TextField()),
('short_id', models.CharField(default=hs_core.models.short_id, max_length=32, db_index=True)),
('doi', models.CharField(help_text=b"Permanent identifier. Never changes once it's been set.", max_length=1024, null=True, db_index=True, blank=True)),
('object_id', models.PositiveIntegerField(null=True, blank=True)),
('content_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)),
('creator', models.ForeignKey(related_name='creator_of_hs_app_netcdf_netcdfresource', to=settings.AUTH_USER_MODEL, help_text=b'This is the person who first uploaded the resource')),
('edit_groups', models.ManyToManyField(help_text=b'This is the set of Hydroshare Groups who can edit the resource', related_name='group_editable_hs_app_netcdf_netcdfresource', null=True, to='auth.Group', blank=True)),
('edit_users', models.ManyToManyField(help_text=b'This is the set of Hydroshare Users who can edit the resource', related_name='user_editable_hs_app_netcdf_netcdfresource', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('last_changed_by', models.ForeignKey(related_name='last_changed_hs_app_netcdf_netcdfresource', to=settings.AUTH_USER_MODEL, help_text=b'The person who last changed the resource', null=True)),
('owners', models.ManyToManyField(help_text=b'The person who has total ownership of the resource', related_name='owns_hs_app_netcdf_netcdfresource', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(related_name='netcdfresources', verbose_name='Author', to=settings.AUTH_USER_MODEL)),
('view_groups', models.ManyToManyField(help_text=b'This is the set of Hydroshare Groups who can view the resource', related_name='group_viewable_hs_app_netcdf_netcdfresource', null=True, to='auth.Group', blank=True)),
('view_users', models.ManyToManyField(help_text=b'This is the set of Hydroshare Users who can view the resource', related_name='user_viewable_hs_app_netcdf_netcdfresource', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Multidimensional (NetCDF)',
},
bases=('pages.page', models.Model),
),
migrations.CreateModel(
name='OriginalCoverage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('_value', models.CharField(max_length=1024, null=True)),
('projection_string_type', models.CharField(max_length=20, null=True, choices=[(b'', b'---------'), (b'EPSG Code', b'EPSG Code'), (b'OGC WKT Projection', b'OGC WKT Projection'), (b'Proj4 String', b'Proj4 String')])),
('projection_string_text', models.TextField(null=True, blank=True)),
('content_type', models.ForeignKey(related_name='hs_app_netcdf_originalcoverage_related', to='contenttypes.ContentType')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('name', models.CharField(max_length=100)),
('unit', models.CharField(max_length=100)),
('type', models.CharField(max_length=100, choices=[(b'Char', b'Char'), (b'Byte', b'Byte'), (b'Short', b'Short'), (b'Int', b'Int'), (b'Float', b'Float'), (b'Double', b'Double'), (b'Int64', b'Int64'), (b'Unsigned Byte', b'Unsigned Byte'), (b'Unsigned Short', b'Unsigned Short'), (b'Unsigned Int', b'Unsigned Int'), (b'Unsigned Int64', b'Unsigned Int64'), (b'String', b'String'), (b'User Defined Type', b'User Defined Type'), (b'Unknown', b'Unknown')])),
('shape', models.CharField(max_length=100)),
('descriptive_name', models.CharField(max_length=100, null=True, verbose_name=b'long name', blank=True)),
('method', models.TextField(null=True, verbose_name=b'comment', blank=True)),
('missing_value', models.CharField(max_length=100, null=True, blank=True)),
('content_type', models.ForeignKey(related_name='hs_app_netcdf_variable_related', to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='originalcoverage',
unique_together=set([('content_type', 'object_id')]),
),
]
|
FescueFungiShare/hydroshare
|
hs_app_netCDF/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 7,033
|
[
"NetCDF"
] |
24f64360901edef9e5939758c339e58164059902178f6e8f883da4ebea9d05ed
|
#!/usr/bin/python3
# Copyright (C) 2007-2010 www.stani.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
import os
from io import StringIO
from itertools import cycle
from urllib.request import urlopen
from PIL import Image
from PIL import ImageDraw
from PIL import ImageEnhance
from PIL import ImageOps, ImageChops, ImageFilter
ALL_PALETTE_INDICES = set(range(256))
CHECKBOARD = {}
COLOR_MAP = [255] * 128 + [0] * 128
WWW_CACHE = {}
EXT_BY_FORMATS = {
'JPEG': ['JPG', 'JPEG', 'JPE'],
'TIFF': ['TIF', 'TIFF'],
'SVG': ['SVG', 'SVGZ'],
}
FORMATS_BY_EXT = {}
for format, exts in EXT_BY_FORMATS.items():
for ext in exts:
FORMATS_BY_EXT[ext] = format
CROSS = 'Cross'
ROUNDED = 'Rounded'
SQUARE = 'Square'
CORNERS = [ROUNDED, SQUARE, CROSS]
CORNER_ID = 'rounded_corner_r%d_f%d'
CROSS_POS = (CROSS, CROSS, CROSS, CROSS)
ROUNDED_POS = (ROUNDED, ROUNDED, ROUNDED, ROUNDED)
ROUNDED_RECTANGLE_ID = 'rounded_rectangle_r%d_f%d_s%s_p%s'
class InvalidWriteFormatError(Exception):
pass
def drop_shadow(image, horizontal_offset=5, vertical_offset=5,
background_color=(255, 255, 255, 0), shadow_color=0x444444,
border=8, shadow_blur=3, force_background_color=False, cache=None):
"""Add a gaussian blur drop shadow to an image.
:param image: The image to overlay on top of the shadow.
:param type: PIL Image
:param offset:
Offset of the shadow from the image as an (x,y) tuple.
Can be positive or negative.
:type offset: tuple of integers
:param background_color: Background color behind the image.
:param shadow_color: Shadow color (darkness).
:param border:
Width of the border around the image. This must be wide
enough to account for the blurring of the shadow.
:param shadow_blur:
Number of times to apply the filter. More shadow_blur
produce a more blurred shadow, but increase processing time.
"""
if cache is None:
cache = {}
if has_transparency(image) and image.mode != 'RGBA':
# Make sure 'LA' and 'P' with trasparency are handled
image = image.convert('RGBA')
#get info
size = image.size
mode = image.mode
back = None
#assert image is RGBA
if mode != 'RGBA':
if mode != 'RGB':
image = image.convert('RGB')
mode = 'RGB'
#create cache id
id = ''.join([str(x) for x in ['shadow_', size,
horizontal_offset, vertical_offset, border, shadow_blur,
background_color, shadow_color]])
#look up in cache
if id in cache:
#retrieve from cache
back, back_size = cache[id]
if back is None:
#size of backdrop
back_size = (size[0] + abs(horizontal_offset) + 2 * border,
size[1] + abs(vertical_offset) + 2 * border)
#create shadow mask
if mode == 'RGBA':
image_mask = get_alpha(image)
shadow = Image.new('L', back_size, 0)
else:
image_mask = Image.new(mode, size, shadow_color)
shadow = Image.new(mode, back_size, background_color)
shadow_left = border + max(horizontal_offset, 0)
shadow_top = border + max(vertical_offset, 0)
paste(shadow, image_mask, (shadow_left, shadow_top,
shadow_left + size[0], shadow_top + size[1]))
del image_mask # free up memory
#blur shadow mask
#Apply the filter to blur the edges of the shadow. Since a small
#kernel is used, the filter must be applied repeatedly to get a decent
#blur.
n = 0
while n < shadow_blur:
shadow = shadow.filter(ImageFilter.BLUR)
n += 1
#create back
if mode == 'RGBA':
back = Image.new('RGBA', back_size, shadow_color)
back.putalpha(shadow)
del shadow # free up memory
else:
back = shadow
cache[id] = back, back_size
#Paste the input image onto the shadow backdrop
image_left = border - min(horizontal_offset, 0)
image_top = border - min(vertical_offset, 0)
if mode == 'RGBA':
paste(back, image, (image_left, image_top), image)
if force_background_color:
mask = get_alpha(back)
paste(back, Image.new('RGB', back.size, background_color),
(0, 0), ImageChops.invert(mask))
back.putalpha(mask)
else:
paste(back, image, (image_left, image_top))
return back
def round_image(image, cache={}, round_all=True, rounding_type=None,
radius=100, opacity=255, pos=ROUNDED_POS, back_color='#FFFFFF'):
if image.mode != 'RGBA':
image = image.convert('RGBA')
if round_all:
pos = 4 * (rounding_type, )
mask = create_rounded_rectangle(image.size, cache, radius, opacity, pos)
paste(image, Image.new('RGB', image.size, back_color), (0, 0),
ImageChops.invert(mask))
image.putalpha(mask)
return image
def create_rounded_rectangle(size=(600, 400), cache={}, radius=100,
opacity=255, pos=ROUNDED_POS):
#rounded_rectangle
im_x, im_y = size
rounded_rectangle_id = ROUNDED_RECTANGLE_ID % (radius, opacity, size, pos)
if rounded_rectangle_id in cache:
return cache[rounded_rectangle_id]
else:
#cross
cross_id = ROUNDED_RECTANGLE_ID % (radius, opacity, size, CROSS_POS)
if cross_id in cache:
cross = cache[cross_id]
else:
cross = cache[cross_id] = Image.new('L', size, 0)
draw = ImageDraw.Draw(cross)
draw.rectangle((radius, 0, im_x - radius, im_y), fill=opacity)
draw.rectangle((0, radius, im_x, im_y - radius), fill=opacity)
if pos == CROSS_POS:
return cross
#corner
corner_id = CORNER_ID % (radius, opacity)
if corner_id in cache:
corner = cache[corner_id]
else:
corner = cache[corner_id] = create_corner(radius, opacity)
#rounded rectangle
rectangle = Image.new('L', (radius, radius), 255)
rounded_rectangle = cross.copy()
for index, angle in enumerate(pos):
if angle == CROSS:
continue
if angle == ROUNDED:
element = corner
else:
element = rectangle
if index % 2:
x = im_x - radius
element = element.transpose(Image.FLIP_LEFT_RIGHT)
else:
x = 0
if index < 2:
y = 0
else:
y = im_y - radius
element = element.transpose(Image.FLIP_TOP_BOTTOM)
paste(rounded_rectangle, element, (x, y))
cache[rounded_rectangle_id] = rounded_rectangle
return rounded_rectangle
def create_corner(radius=100, opacity=255, factor=2):
corner = Image.new('L', (factor * radius, factor * radius), 0)
draw = ImageDraw.Draw(corner)
draw.pieslice((0, 0, 2 * factor * radius, 2 * factor * radius),
180, 270, fill=opacity)
corner = corner.resize((radius, radius), Image.ANTIALIAS)
return corner
def get_format(ext):
"""Guess the image format by the file extension.
:param ext: file extension
:type ext: string
:returns: image format
:rtype: string
.. warning::
This is only meant to check before saving files. For existing files
open the image with PIL and check its format attribute.
>>> get_format('jpg')
'JPEG'
"""
ext = ext.lstrip('.').upper()
return FORMATS_BY_EXT.get(ext, ext)
def open_image_data(data):
"""Open image from format data.
:param data: image format data
:type data: string
:returns: image
:rtype: pil.Image
"""
return Image.open(StringIO(data))
def open_image_exif(uri):
"""Open local files or remote files over http and transpose the
image to its exif orientation.
:param uri: image location
:type uri: string
:returns: image
:rtype: pil.Image
"""
return transpose_exif(open_image(uri))
class _ByteCounter:
"""Helper class to count how many bytes are written to a file.
.. see also:: :func:`get_size`
>>> bc = _ByteCounter()
>>> bc.write('12345')
>>> bc.bytes
5
"""
def __init__(self):
self.bytes = 0
def write(self, data):
self.bytes += len(data)
def get_size(im, format, **options):
"""Gets the size in bytes if the image would be written to a file.
:param format: image file format (e.g. ``'JPEG'``)
:type format: string
:returns: the file size in bytes
:rtype: int
"""
try:
out = _ByteCounter()
im.save(out, format, **options)
return out.bytes
except AttributeError:
# fall back on full in-memory compression
out = StringIO()
im.save(out, format, **options)
return len(out.getvalue())
def get_quality(im, size, format, down=0, up=100, delta=1000, options=None):
"""Figure out recursively the quality save parameter to obtain a
certain image size. This mostly used for ``JPEG`` images.
:param im: image
:type im: pil.Image
:param format: image file format (e.g. ``'JPEG'``)
:type format: string
:param down: minimum file size in bytes
:type down: int
:param up: maximum file size in bytes
:type up: int
:param delta: fault tolerance in bytes
:type delta: int
:param options: image save options
:type options: dict
:returns: save quality
:rtype: int
Example::
filename = '/home/stani/sync/Desktop/IMGA3345.JPG'
im = Image.open(filename)
q = get_quality(im, 300000, "JPEG")
im.save(filename.replace('.jpg', '_sized.jpg'))
"""
if options is None:
options = {}
q = options['quality'] = (down + up) / 2
if q == down or q == up:
return max(q, 1)
s = get_size(im, format, **options)
if abs(s - size) < delta:
return q
elif s > size:
return get_quality(im, size, format, down, up=q, options=options)
else:
return get_quality(im, size, format, down=q, up=up, options=options)
def fill_background_color(image, color):
"""Fills given image with background color.
:param image: source image
:type image: pil.Image
:param color: background color
:type color: tuple of int
:returns: filled image
:rtype: pil.Image
"""
if image.mode == 'LA':
image = image.convert('RGBA')
elif image.mode != 'RGBA' and\
not (image.mode == 'P' and 'transparency' in image.info):
return image
if len(color) == 4 and color[-1] != 255:
mode = 'RGBA'
else:
mode = 'RGB'
back = Image.new(mode, image.size, color)
if (image.mode == 'P' and mode == 'RGBA'):
image = image.convert('RGBA')
if has_alpha(image):
paste(back, image, mask=image)
elif image.mode == 'P':
palette = image.getpalette()
index = image.info['transparency']
palette[index * 3: index * 3 + 3] = color[:3]
image.putpalette(palette)
del image.info['transparency']
back = image
else:
paste(back, image)
return back
def generate_layer(image_size, mark, method,
horizontal_offset, vertical_offset,
horizontal_justification, vertical_justification,
orientation, opacity):
"""Generate new layer for backgrounds or watermarks on which a given
image ``mark`` can be positioned, scaled or repeated.
:param image_size: size of the reference image
:type image_size: tuple of int
:param mark: image mark
:type mark: pil.Image
:param method: ``'Tile'``, ``'Scale'``, ``'By Offset'``
:type method: string
:param horizontal_offset: horizontal offset
:type horizontal_offset: int
:param vertical_offset: vertical offset
:type vertical_offset: int
:param horizontal_justification: ``'Left'``, ``'Middle'``, ``'Right'``
:type horizontal_justification: string
:param vertical_justification: ``'Top'``, ``'Middle'``, ``'Bottom'``
:type vertical_justification: string
:param orientation: mark orientation (e.g. ``'ROTATE_270'``)
:type orientation: string
:param opacity: opacity within ``[0, 1]``
:type opacity: float
:returns: generated layer
:rtype: pil.Image
.. see also:: :func:`reduce_opacity`
"""
mark = convert_safe_mode(open_image(mark))
opacity /= 100.0
mark = reduce_opacity(mark, opacity)
layer = Image.new('RGBA', image_size, (0, 0, 0, 0))
if method == 'Tile':
for y in range(0, image_size[1], mark.size[1]):
for x in range(0, image_size[0], mark.size[0]):
paste(layer, mark, (x, y))
elif method == 'Scale':
# scale, but preserve the aspect ratio
ratio = min(float(image_size[0]) / mark.size[0],
float(image_size[1]) / mark.size[1])
w = int(mark.size[0] * ratio)
h = int(mark.size[1] * ratio)
mark = mark.resize((w, h))
paste(layer, mark, ((image_size[0] - w) / 2,
(image_size[1] - h) / 2))
elif method == 'By Offset':
location = calculate_location(
horizontal_offset, vertical_offset,
horizontal_justification, vertical_justification,
image_size, mark.size)
if orientation:
orientation_value = getattr(Image, orientation)
mark = mark.transpose(orientation_value)
paste(layer, mark, location, force=True)
else:
raise ValueError('Unknown method "%s" for generate_layer.' % method)
return layer
def identity_color(image, value=0):
"""Get a color with same color component values.
>>> im = Image.new('RGB', (1,1))
>>> identity_color(im, 2)
(2, 2, 2)
>>> im = Image.new('L', (1,1))
>>> identity_color(im, 7)
7
"""
bands = image.getbands()
if len(bands) == 1:
return value
return tuple([value for band in bands])
def blend(im1, im2, amount, color=None):
"""Blend two images with each other. If the images differ in size
the color will be used for undefined pixels.
:param im1: first image
:type im1: pil.Image
:param im2: second image
:type im2: pil.Image
:param amount: amount of blending
:type amount: int
:param color: color of undefined pixels
:type color: tuple
:returns: blended image
:rtype: pil.Image
"""
im2 = convert_safe_mode(im2)
if im1.size == im2.size:
im1 = convert(im1, im2.mode)
else:
if color is None:
expanded = Image.new(im2.mode, im2.size)
elif im2.mode in ('1', 'L') and type(color) != int:
expanded = Image.new(im2.mode, im2.size, color[0])
else:
expanded = Image.new(im2.mode, im2.size, color)
im1 = im1.convert(expanded.mode)
we, he = expanded.size
wi, hi = im1.size
paste(expanded, im1, ((we - wi) / 2, (he - hi) / 2),
im1.convert('RGBA'))
im1 = expanded
return Image.blend(im1, im2, amount)
def reduce_opacity(im, opacity):
"""Returns an image with reduced opacity if opacity is
within ``[0, 1]``.
:param im: source image
:type im: pil.Image
:param opacity: opacity within ``[0, 1]``
:type opacity: float
:returns im: image
:rtype: pil.Image
>>> im = Image.new('RGBA', (1, 1), (255, 255, 255))
>>> im = reduce_opacity(im, 0.5)
>>> im.getpixel((0,0))
(255, 255, 255, 127)
"""
if opacity < 0 or opacity > 1:
return im
alpha = get_alpha(im)
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
put_alpha(im, alpha)
return im
def calculate_location(horizontal_offset, vertical_offset,
horizontal_justification, vertical_justification,
canvas_size, image_size):
"""Calculate location based on offset and justification. Offsets
can be positive and negative.
:param horizontal_offset: horizontal offset
:type horizontal_offset: int
:param vertical_offset: vertical offset
:type vertical_offset: int
:param horizontal_justification: ``'Left'``, ``'Middle'``, ``'Right'``
:type horizontal_justification: string
:param vertical_justification: ``'Top'``, ``'Middle'``, ``'Bottom'``
:type vertical_justification: string
:param canvas_size: size of the total canvas
:type canvas_size: tuple of int
:param image_size: size of the image/text which needs to be placed
:type image_size: tuple of int
:returns: location
:rtype: tuple of int
.. see also:: :func:`generate layer`
>>> calculate_location(50, 50, 'Left', 'Middle', (100,100), (10,10))
(50, 45)
"""
canvas_width, canvas_height = canvas_size
image_width, image_height = image_size
# check offsets
if horizontal_offset < 0:
horizontal_offset += canvas_width
if vertical_offset < 0:
vertical_offset += canvas_height
# check justifications
if horizontal_justification == 'Left':
horizontal_delta = 0
elif horizontal_justification == 'Middle':
horizontal_delta = -image_width / 2
elif horizontal_justification == 'Right':
horizontal_delta = -image_width
if vertical_justification == 'Top':
vertical_delta = 0
elif vertical_justification == 'Middle':
vertical_delta = -image_height / 2
elif vertical_justification == 'Bottom':
vertical_delta = -image_height
return horizontal_offset + horizontal_delta, \
vertical_offset + vertical_delta
####################################
#### PIL helper functions ####
####################################
def flatten(l):
"""Flatten a list.
:param l: list to be flattened
:type l: list
:returns: flattened list
:rtype: list
>>> flatten([[1, 2], [3]])
[1, 2, 3]
"""
return [item for sublist in l for item in sublist]
def has_alpha(image):
"""Checks if the image has an alpha band.
i.e. the image mode is either RGBA or LA.
The transparency in the P mode doesn't count as an alpha band
:param image: the image to check
:type image: PIL image object
:returns: True or False
:rtype: boolean
"""
return image.mode.endswith('A')
def has_transparency(image):
"""Checks if the image has transparency.
The image has an alpha band or a P mode with transparency.
:param image: the image to check
:type image: PIL image object
:returns: True or False
:rtype: boolean
"""
return (image.mode == 'P' and 'transparency' in image.info) or\
has_alpha(image)
def get_alpha(image):
"""Gets the image alpha band. Can handles P mode images with transpareny.
Returns a band with all values set to 255 if no alpha band exists.
:param image: input image
:type image: PIL image object
:returns: alpha as a band
:rtype: single band image object
"""
if has_alpha(image):
return image.split()[-1]
if image.mode == 'P' and 'transparency' in image.info:
return image.convert('RGBA').split()[-1]
# No alpha layer, create one.
return Image.new('L', image.size, 255)
def get_format_data(image, format):
"""Convert the image in the file bytes of the image. By consequence
this byte data is different for the chosen format (``JPEG``,
``TIFF``, ...).
.. see also:: :func:`thumbnail.get_format_data`
:param image: source image
:type impage: pil.Image
:param format: image file type format
:type format: string
:returns: byte data of the image
"""
f = StringIO()
convert_save_mode_by_format(image, format).save(f, format)
return f.getvalue()
def get_palette(image):
"""Gets the palette of an image as a sequence of (r, g, b) tuples.
:param image: image with a palette
:type impage: pil.Image
:returns: palette colors
:rtype: a sequence of (r, g, b) tuples
"""
palette = image.resize((256, 1))
palette.putdata(range(256))
return list(palette.convert("RGB").getdata())
def get_used_palette_indices(image):
"""Get used color indices in an image palette.
:param image: image with a palette
:type impage: pil.Image
:returns: used colors of the palette
:rtype: set of integers (0-255)
"""
return set(image.getdata())
def get_used_palette_colors(image):
"""Get used colors in an image palette as a sequence of (r, g, b) tuples.
:param image: image with a palette
:type impage: pil.Image
:returns: used colors of the palette
:rtype: sequence of (r, g, b) tuples
"""
used_indices = get_used_palette_indices(image)
if 'transparency' in image.info:
used_indices -= set([image.info['transparency']])
n = len(used_indices)
palette = image.resize((n, 1))
palette.putdata(used_indices)
return palette.convert("RGB").getdata()
def get_unused_palette_indices(image):
"""Get unused color indices in an image palette.
:param image: image with a palette
:type impage: pil.Image
:returns: unused color indices of the palette
:rtype: set of 0-255
"""
return ALL_PALETTE_INDICES - get_used_palette_indices(image)
def fit_color_in_palette(image, color):
"""Fit a color into a palette. If the color exists already in the palette
return its current index, otherwise add the color to the palette if
possible. Returns -1 for color index if all colors are used already.
:param image: image with a palette
:type image: pil.Image
:param color: color to fit
:type color: (r, g, b) tuple
:returns: color index, (new) palette
:rtype: (r, g, b) tuple, sequence of (r, g, b) tuples
"""
palette = get_palette(image)
try:
index = palette.index(color)
except ValueError:
index = -1
if index > -1:
# Check if it is not the transparent index, as that doesn't qualify.
try:
transparent = index == image.info['transparency']
except KeyError:
transparent = False
# If transparent, look further
if transparent:
try:
index = palette[index + 1:].index(color) + index + 1
except ValueError:
index = -1
if index == -1:
unused = list(get_unused_palette_indices(image))
if unused:
index = unused[0]
palette[index] = color # add color to palette
else:
palette = None # palette is full
return index, palette
def put_palette(image_to, image_from, palette=None):
"""Copies the palette and transparency of one image to another.
:param image_to: image with a palette
:type image_to: pil.Image
:param image_from: image with a palette
:type image_from: pil.Image
:param palette: image palette
:type palette: sequence of (r, g, b) tuples or None
"""
if palette == None:
palette = get_palette(image_from)
image_to.putpalette(flatten(palette))
if 'transparency' in image_from.info:
image_to.info['transparency'] = image_from.info['transparency']
def put_alpha(image, alpha):
"""Copies the given band to the alpha layer of the given image.
:param image: input image
:type image: PIL image object
:param alpha: the alpha band to copy
:type alpha: single band image object
"""
if image.mode in ['CMYK', 'YCbCr', 'P']:
image = image.convert('RGBA')
elif image.mode in ['1', 'F']:
image = image.convert('RGBA')
image.putalpha(alpha)
def remove_alpha(image):
"""Returns a copy of the image after removing the alpha band or
transparency
:param image: input image
:type image: PIL image object
:returns: the input image after removing the alpha band or transparency
:rtype: PIL image object
"""
if image.mode == 'RGBA':
return image.convert('RGB')
if image.mode == 'LA':
return image.convert('L')
if image.mode == 'P' and 'transparency' in image.info:
img = image.convert('RGB')
del img.info['transparency']
return img
return image
def paste(destination, source, box=(0, 0), mask=None, force=False):
""""Pastes the source image into the destination image while using an
alpha channel if available.
:param destination: destination image
:type destination: PIL image object
:param source: source image
:type source: PIL image object
:param box:
The box argument is either a 2-tuple giving the upper left corner,
a 4-tuple defining the left, upper, right, and lower pixel coordinate,
or None (same as (0, 0)). If a 4-tuple is given, the size of the
pasted image must match the size of the region.
:type box: tuple
:param mask: mask or None
:type mask: bool or PIL image object
:param force:
With mask: Force the invert alpha paste or not.
Without mask:
- If ``True`` it will overwrite the alpha channel of the destination
with the alpha channel of the source image. So in that case the
pixels of the destination layer will be abandoned and replaced
by exactly the same pictures of the destination image. This is mostly
what you need if you paste on a transparant canvas.
- If ``False`` this will use a mask when the image has an alpha
channel. In this case pixels of the destination image will appear
through where the source image is transparent.
:type force: bool
"""
# Paste on top
if mask and source == mask:
if has_alpha(source):
# invert_alpha = the transparant pixels of the destination
if has_alpha(destination) and (destination.size == source.size
or force):
invert_alpha = ImageOps.invert(get_alpha(destination))
if invert_alpha.size != source.size:
# if sizes are not the same be careful!
# check the results visually
if len(box) == 2:
w, h = source.size
box = (box[0], box[1], box[0] + w, box[1] + h)
invert_alpha = invert_alpha.crop(box)
else:
invert_alpha = None
# we don't want composite of the two alpha channels
source_without_alpha = remove_alpha(source)
# paste on top of the opaque destination pixels
destination.paste(source_without_alpha, box, source)
if invert_alpha != None:
# the alpha channel is ok now, so save it
destination_alpha = get_alpha(destination)
# paste on top of the transparant destination pixels
# the transparant pixels of the destination should
# be filled with the color information from the source
destination.paste(source_without_alpha, box, invert_alpha)
# restore the correct alpha channel
destination.putalpha(destination_alpha)
else:
destination.paste(source, box)
elif mask:
destination.paste(source, box, mask)
else:
destination.paste(source, box)
if force and has_alpha(source):
destination_alpha = get_alpha(destination)
source_alpha = get_alpha(source)
destination_alpha.paste(source_alpha, box)
destination.putalpha(destination_alpha)
def auto_crop(image):
"""Crops all transparent or black background from the image
:param image: input image
:type image: PIL image object
:returns: the cropped image
:rtype: PIL image object
"""
alpha = get_alpha(image)
box = alpha.getbbox()
return convert_safe_mode(image).crop(box)
def convert(image, mode, *args, **keyw):
"""Returns a converted copy of an image
:param image: input image
:type image: PIL image object
:param mode: the new mode
:type mode: string
:param args: extra options
:type args: tuple of values
:param keyw: extra keyword options
:type keyw: dictionary of options
:returns: the converted image
:rtype: PIL image object
"""
if mode == 'P':
if image.mode == 'P':
return image
if image.mode in ['1', 'F']:
return image.convert('L').convert(mode, *args, **keyw)
if image.mode in ['RGBA', 'LA']:
alpha = get_alpha(image)
output = image.convert('RGB').convert(
mode, colors=255, *args, **keyw)
paste(output,
255, alpha.point(COLOR_MAP))
output.info['transparency'] = 255
return output
return image.convert('RGB').convert(mode, *args, **keyw)
if image.mode == 'P' and mode == 'LA':
# A workaround for a PIL bug.
# Converting from P to LA directly doesn't work.
return image.convert('RGBA').convert('LA', *args, **keyw)
if has_transparency(image) and (not mode in ['RGBA', 'LA']):
if image.mode == 'P':
image = image.convert('RGBA')
del image.info['transparency']
#image = fill_background_color(image, (255, 255, 255, 255))
image = image.convert(mode, *args, **keyw)
return image
return image.convert(mode, *args, **keyw)
def convert_safe_mode(image):
"""Converts image into a processing-safe mode.
:param image: input image
:type image: PIL image object
:returns: the converted image
:rtype: PIL image object
"""
if image.mode in ['1', 'F']:
return image.convert('L')
if image.mode == 'P' and 'transparency' in image.info:
img = image.convert('RGBA')
del img.info['transparency']
return img
if image.mode in ['P', 'YCbCr', 'CMYK', 'RGBX']:
return image.convert('RGB')
return image
def convert_save_mode_by_format(image, format):
"""Converts image into a saving-safe mode.
:param image: input image
:type image: PIL image object
:param format: target format
:type format: string
:returns: the converted image
:rtype: PIL image object
"""
#TODO: Extend this helper function to support other formats as well
if image.mode == 'P':
# Make sure P is handled correctly
if not format in ['GIF', 'PNG', 'TIFF', 'IM', 'PCX']:
image = remove_alpha(image)
if format == 'JPEG':
if image.mode in ['RGBA', 'P']:
return image.convert('RGB')
if image.mode in ['LA']:
return image.convert('L')
elif format == 'BMP':
if image.mode in ['LA']:
return image.convert('L')
if image.mode in ['P', 'RGBA', 'YCbCr', 'CMYK']:
return image.convert('RGB')
elif format == 'DIB':
if image.mode in ['YCbCr', 'CMYK']:
return image.convert('RGB')
elif format == 'EPS':
if image.mode in ['1', 'LA']:
return image.convert('L')
if image.mode in ['P', 'RGBA', 'YCbCr']:
return image.convert('RGB')
elif format == 'GIF':
return convert(image, 'P', palette=Image.ADAPTIVE)
elif format == 'PBM':
if image.mode != '1':
return image.convert('1')
elif format == 'PCX':
if image.mode in ['RGBA', 'CMYK', 'YCbCr']:
return image.convert('RGB')
if image.mode in ['LA', '1']:
return image.convert('L')
elif format == 'PDF':
if image.mode in ['LA']:
return image.convert('L')
if image.mode in ['RGBA', 'YCbCr']:
return image.convert('RGB')
elif format == 'PGM':
if image.mode != 'L':
return image.convert('L')
elif format == 'PPM':
if image.mode in ['P', 'CMYK', 'YCbCr']:
return image.convert('RGB')
if image.mode in ['LA']:
return image.convert('L')
elif format == 'PS':
if image.mode in ['1', 'LA']:
return image.convert('L')
if image.mode in ['P', 'RGBA', 'YCbCr']:
return image.convert('RGB')
elif format == 'XBM':
if not image.mode in ['1']:
return image.convert('1')
elif format == 'TIFF':
if image.mode in ['YCbCr']:
return image.convert('RGB')
elif format == 'PNG':
if image.mode in ['CMYK', 'YCbCr']:
return image.convert('RGB')
#for consistency return a copy! (thumbnail.py depends on it)
return image.copy()
def save_check_mode(image, filename, **options):
#save image with pil
save(image, filename, **options)
#verify saved file
try:
image_file = Image.open(filename)
image_file.verify()
except IOError:
# We can't verify the image mode with PIL, so issue no warnings.
return ''
if image.mode != image_file.mode:
return image_file.mode
return ''
def save_safely(image, filename):
"""Saves an image with a filename and raise the specific
``InvalidWriteFormatError`` in case of an error instead of a
``KeyError``. It can also save IM files with unicode.
:param image: image
:type image: pil.Image
:param filename: image filename
:type filename: string
"""
ext = os.path.splitext(filename)[-1]
format = get_format(ext[1:])
image = convert_save_mode_by_format(image, format)
save(image, filename)
def get_reverse_transposition(transposition):
"""Get the reverse transposition method.
:param transposition: transpostion, e.g. ``Image.ROTATE_90``
:returns: inverse transpostion, e.g. ``Image.ROTATE_270``
"""
if transposition == Image.ROTATE_90:
return Image.ROTATE_270
elif transposition == Image.ROTATE_270:
return Image.ROTATE_90
return transposition
def get_exif_transposition(orientation):
"""Get the transposition methods necessary to aling the image to
its exif orientation.
:param orientation: exif orientation
:type orientation: int
:returns: (transposition methods, reverse transpostion methods)
:rtype: tuple
"""
#see EXIF.py
if orientation == 1:
transposition = transposition_reverse = ()
elif orientation == 2:
transposition = Image.FLIP_LEFT_RIGHT,
transposition_reverse = Image.FLIP_LEFT_RIGHT,
elif orientation == 3:
transposition = Image.ROTATE_180,
transposition_reverse = Image.ROTATE_180,
elif orientation == 4:
transposition = Image.FLIP_TOP_BOTTOM,
transposition_reverse = Image.FLIP_TOP_BOTTOM,
elif orientation == 5:
transposition = Image.FLIP_LEFT_RIGHT, \
Image.ROTATE_90
transposition_reverse = Image.ROTATE_270, \
Image.FLIP_LEFT_RIGHT
elif orientation == 6:
transposition = Image.ROTATE_270,
transposition_reverse = Image.ROTATE_90,
elif orientation == 7:
transposition = Image.FLIP_LEFT_RIGHT, \
Image.ROTATE_270
transposition_reverse = Image.ROTATE_90, \
Image.FLIP_LEFT_RIGHT
elif orientation == 8:
transposition = Image.ROTATE_90,
transposition_reverse = Image.ROTATE_270,
else:
transposition = transposition_reverse = ()
return transposition, transposition_reverse
def get_exif_orientation(image):
"""Gets the exif orientation of an image.
:param image: image
:type image: pil.Image
:returns: orientation
:rtype: int
"""
if not hasattr(image, '_getexif'):
return 1
try:
_exif = image._getexif()
if not _exif:
return 1
return _exif[0x0112]
except KeyError:
return 1
def transpose(image, methods):
"""Transpose with a sequence of transformations, mainly useful
for exif.
:param image: image
:type image: pil.Image
:param methods: transposition methods
:type methods: list
:returns: transposed image
:rtype: pil.Image
"""
for method in methods:
image = image.transpose(method)
return image
def transpose_exif(image, reverse=False):
"""Transpose an image to its exif orientation.
:param image: image
:type image: pil.Image
:param reverse: False when opening, True when saving
:type reverse: bool
:returns: transposed image
:rtype: pil.Image
"""
orientation = get_exif_orientation(image)
transposition = get_exif_transposition(orientation)[int(reverse)]
if transposition:
return transpose(image, transposition)
return image
def checkboard(size, delta=8, fg=(128, 128, 128), bg=(204, 204, 204)):
"""Draw an n x n checkboard, which is often used as background
for transparent images. The checkboards are stored in the
``CHECKBOARD`` cache.
:param delta: dimension of one square
:type delta: int
:param fg: foreground color
:type fg: tuple of int
:param bg: background color
:type bg: tuple of int
:returns: checkboard image
:rtype: pil.Image
"""
if not (size in CHECKBOARD):
dim = max(size)
n = int(dim / delta) + 1 # FIXME: now acts like square->nx, ny
def sq_start(i):
"Return the x/y start coord of the square at column/row i."
return i * delta
def square(i, j):
"Return the square corners"
return map(sq_start, [i, j, i + 1, j + 1])
image = Image.new("RGB", size, bg)
draw_square = ImageDraw.Draw(image).rectangle
squares = (square(i, j)
for i_start, j in zip(cycle((0, 1)), range(n))
for i in range(i_start, n, 2))
for sq in squares:
draw_square(sq, fill=fg)
CHECKBOARD[size] = image
return CHECKBOARD[size].copy()
def add_checkboard(image):
""""If the image has a transparent mask, a RGB checkerboard will be
drawn in the background.
.. note::
In case of a thumbnail, the resulting image can not be used for
the cache, as it replaces the transparency layer with a non
transparent checkboard.
:param image: image
:type image: pil.Image
:returns: image, with checkboard if transparant
:rtype: pil.Image
"""
if (image.mode == 'P' and 'transparency' in image.info) or\
image.mode.endswith('A'):
#transparant image
image = image.convert('RGBA')
image_bg = checkboard(image.size)
paste(image_bg, image, (0, 0), image)
return image_bg
else:
return image
|
Curly060/Cinnamon
|
files/usr/share/cinnamon/cinnamon-settings/bin/imtools.py
|
Python
|
gpl-2.0
| 39,628
|
[
"Gaussian"
] |
688240100ff8e9fc9e7dde02158c351b366bdeeecf4091bbd07c8993e7af17ca
|
import os.path
import pysam
__program__ = 'prepare'
__author__ = 'Soh Ishiguro <yukke@g-language.org>'
__license__ = ''
__status__ = 'development'
class AlignmentPreparation(object):
def __init__(self):
# TODO: AlignmentStream.__init__ move into here.
pass
def alignment_prepare(self):
raise NotImplementedError
def __sort(self):
if not os.path.isfile(bamfile):
try:
pysam.sort(self.samfile, self.samfile + 'sorted')
sort_log = pysam.sort.getMessage()
return True
except:
raise RuntimeError()
else:
print "already sorted"
return False
def __index(self):
if not os.path.isfile(samfile + '.index.bam'):
try:
pysam.index(self.samfile)
return True
except:
raise RuntimeError()
else:
print "already indexed"
return False
def __faidx(self):
if not os.path.isfile(fafile + '.fai'):
try:
pysam.faidx(self.fafile)
return True
except:
raise RuntimeError()
else:
print "already exist"
return False
def __merge_bams(self, bams=[]):
for _ in bams:
if not os.path.isfile(_):
raise RuntimeError()
try:
pysam.merge([_ for _ in bams])
return True
except:
raise RuntimeError()
|
soh-i/Ivy
|
src/Ivy/alignment/prepare.py
|
Python
|
gpl-2.0
| 1,568
|
[
"pysam"
] |
a59d4e4537a82e43b48633176759a6efa3dce70b66a7d8689f51cf4d5209969d
|
"""Class for making requests to a ComponentMonitoring Service."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Base.Client import Client, createClient
@createClient('Framework/ComponentMonitoring')
class ComponentMonitoringClient(Client):
def __init__(self, **kwargs):
"""
Constructor function
"""
super(ComponentMonitoringClient, self).__init__(**kwargs)
self.setServer('Framework/ComponentMonitoring')
|
yujikato/DIRAC
|
src/DIRAC/FrameworkSystem/Client/ComponentMonitoringClient.py
|
Python
|
gpl-3.0
| 531
|
[
"DIRAC"
] |
8e1de74d37832428329acc5d683be8de5e4b1d8f9fc09458de4f16262feb9c6e
|
"""
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each included distribution is an instance of the class rv_continuous:
For each given name the following methods are available:
.. autosummary::
:toctree: generated/
rv_continuous
rv_continuous.pdf
rv_continuous.logpdf
rv_continuous.cdf
rv_continuous.logcdf
rv_continuous.sf
rv_continuous.logsf
rv_continuous.ppf
rv_continuous.isf
rv_continuous.moment
rv_continuous.stats
rv_continuous.entropy
rv_continuous.fit
rv_continuous.expect
Calling the instance as a function returns a frozen pdf whose shape,
location, and scale parameters are fixed.
Similarly, each discrete distribution is an instance of the class
rv_discrete:
.. autosummary::
:toctree: generated/
rv_discrete
rv_discrete.rvs
rv_discrete.pmf
rv_discrete.logpmf
rv_discrete.cdf
rv_discrete.logcdf
rv_discrete.sf
rv_discrete.logsf
rv_discrete.ppf
rv_discrete.isf
rv_discrete.stats
rv_discrete.moment
rv_discrete.entropy
rv_discrete.expect
Continuous distributions
========================
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l -- Frechet Left Sided, Weibull_max
genlogistic -- Generalized Logistic
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
t -- Student's T
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
==========================
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
Discrete distributions
======================
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
zipf -- Zipf
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
kurtosistest --
mode -- Modal value
moment -- Central moment
normaltest --
skew -- Skewness
skewtest --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
nanmean -- Mean, ignoring NaN values
nanstd -- Standard deviation, ignoring NaN values
nanmedian -- Median, ignoring NaN values
variation -- Coefficient of variation
.. autosummary::
:toctree: generated/
cumfreq _
histogram2 _
histogram _
itemfreq _
percentileofscore _
scoreatpercentile _
relfreq _
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
bayes_mvs
sem
zmap
zscore
.. autosummary::
:toctree: generated/
sigmaclip
threshold
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
kstest
chisquare
power_divergence
ks_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
fligner
median_test
mood
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
entropy
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.expected_freq
contingency.margins
fisher_exact
Plot-tests
==========
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from __future__ import division, print_function, absolute_import
from .stats import *
from .distributions import *
from .rv import *
from .morestats import *
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from .contingency import chi2_contingency
from ._multivariate import *
#remove vonmises_cython from __all__, I don't know why it is included
__all__ = [s for s in dir() if not (s.startswith('_') or s.endswith('cython'))]
from numpy.testing import Tester
test = Tester().test
|
maciejkula/scipy
|
scipy/stats/__init__.py
|
Python
|
bsd-3-clause
| 9,213
|
[
"Gaussian"
] |
f1bcc13dd272a8a65021cb8f82ff725715dae470aa3e5eb741abf9eec3685127
|
#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Physical property estimation API.
Authors
-------
* John D. Chodera <john.chodera@choderalab.org>
TODO
----
* Implement methods
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
import sys
import time
import copy
import numpy as np
from simtk import openmm, unit
#=============================================================================================
# COMPUTED PROPERTY RESULT
#=============================================================================================
class PropertyComputationSimulation(object):
"""Container for information about a simulation that was run.
Properties
----------
length : simtk.unit.Quantity with units compatible with nanoseconds
The length of the simulation.
thermodynamic_state : ThermodynamicState
The thermodynamic state at which the simulation was run.
substance : Substance
The substance that was simulated.
system : simtk.openmm.System
The system that was simulated.
"""
def __init__(self):
pass
class ComputedProperty(object):
"""Computed physical property result.
Properties
----------
value : simtk.unit.Quantity (possibly wrapping a numpy array)
The estimated (computed) value of the property
uncertainty : simtk.unit.Quantity with same dimension and units as 'value'
The estimated uncertainty (standard error) of the computed 'value'
parameters : ParameterSet
The parameter set used to compute the provided property
simulations : set of Simulation objects
The simulations that contributed to this property estimate
"""
def __init__(self):
self.simulations = set()
class ComputedPropertySet(list):
"""Set of computed physical properties.
"""
def __init__(self):
pass
#=============================================================================================
# PROPERTY ESTIMATOR
#=============================================================================================
class PropertyEstimator(object):
"""Physical property estimation interface.
This is a generic interface.
Multiple backends will be supported in the future, and computation is not guaranteed to happen anywhere specific.
Intermediate files are not guaranteed to be stored; intermediate simulation data may be generated as needed.
Examples
--------
>>> estimator = PropertyEstimator(nworkers=10) # NOTE: multiple backends will be supported in the future
>>> computed_properties = estimator.computeProperties(dataset, parameter_sets)
"""
def __init__(self, **kwargs):
"""Create a physical property estimator interface.
"""
pass
def computeProperties(self, dataset, parametersets, target_relative_uncertainty=0.1):
"""Compute physical properties for the specified dataset given one or more parameter sets.
Parameters
----------
dataset : PhysicalPropertyDataset
The dataset for which physical properties are to be computed.
parametersets : ParameterSet or iterable of Parameterset
Parameter set(s) for which physical properties are to be computed.
target_uncertainty : float, optional, default=0.1
Target computational uncertainty in the computed property, relative to experimental uncertainty.
Returns
-------
properties : ComputedPropertySet object or list of ComputedPropertySet objects
The computed physical properties.
"""
# Attempt to estimate all simulated properties by reweighting
simulations_to_run = list() # list of simulations that need to be rerun
computed_properties_sets = list()
for parameters in parameter_sets:
computed_property_set = ComputedPropertySet()
for measured_property in dataset:
# Estimate property via reweighting.
computed_property = self._estimateProperty(measured_property)
if (computed_property is None) or (computed_property.uncertainty > target_relative_uncertainty * measured_property.uncertainty):
# Uncertainty threshold exceeded; queue for simulation
simulation = Simulation(thermodynamic_state=measured_property.thermodynamic_state, composition=thermodynamic_state.composition)
simulations_to_run.append( (simulation, measured_property, target_relative_uncertainty) )
computed_property = ComputedProperty()
computed_property_set.append(computed_property)
computed_properties_sets.append(computed_property_set)
# Run queued simulations
# TODO: Parallelize
for (simulation, target_property, target_uncertainty) in simulations_to_run:
simulation.run(target_property=target_property, target_uncertainty=target_uncertainty)
# Return computed physical property dataset(s)
return computed_properties_sets
|
bmanubay/open-forcefield-tools
|
openforcefield/propertyestimator.py
|
Python
|
mit
| 5,436
|
[
"OpenMM"
] |
ac7287cb069dc9dbde70e989cda4fcd3bf694334c5de86356f89ddc87757aff4
|
################################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Black-box variational inference
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import bayespy.plot as myplt
from bayespy.utils import misc
from bayespy.utils import random
from bayespy.nodes import GaussianARD, LogPDF, Dot
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
import bayespy.plot as bpplt
from bayespy.demos import pca
def run(M=10, N=100, D=5, seed=42, maxiter=100, plot=True):
"""
Run deterministic annealing demo for 1-D Gaussian mixture.
"""
raise NotImplementedError("Black box variational inference not yet implemented, sorry")
if seed is not None:
np.random.seed(seed)
# Generate data
data = np.dot(np.random.randn(M,D),
np.random.randn(D,N))
# Construct model
C = GaussianARD(0, 1, shape=(2,), plates=(M,1), name='C')
X = GaussianARD(0, 1, shape=(2,), plates=(1,N), name='X')
F = Dot(C, X)
# Some arbitrary log likelihood
def logpdf(y, f):
"""
exp(f) / (1 + exp(f)) = 1/(1+exp(-f))
-log(1+exp(-f)) = -log(exp(0)+exp(-f))
also:
1 - exp(f) / (1 + exp(f)) = (1 + exp(f) - exp(f)) / (1 + exp(f))
= 1 / (1 + exp(f))
= -log(1+exp(f)) = -log(exp(0)+exp(f))
"""
return -np.logaddexp(0, -f * np.where(y, -1, +1))
Y = LogPDF(logpdf, F, samples=10, shape=())
#Y = GaussianARD(F, 1)
Y.observe(data)
Q = VB(Y, C, X)
Q.ignore_bound_checks = True
delay = 1
forgetting_rate = 0.7
for n in range(maxiter):
# Observe a mini-batch
#subset = np.random.choice(N, N_batch)
#Y.observe(data[subset,:])
# Learn intermediate variables
#Q.update(Z)
# Set step length
step = (n + delay) ** (-forgetting_rate)
# Stochastic gradient for the global variables
Q.gradient_step(C, X, scale=step)
if plot:
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'r:')
bpplt.pyplot.xlabel('CPU time (in seconds)')
bpplt.pyplot.ylabel('VB lower bound')
return
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["n=",
"batch=",
"seed=",
"maxiter="])
except getopt.GetoptError:
print('python stochastic_inference.py <options>')
print('--n=<INT> Number of data points')
print('--batch=<INT> Mini-batch size')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt in ("--n",):
kwargs["N"] = int(arg)
elif opt in ("--batch",):
kwargs["N_batch"] = int(arg)
run(**kwargs)
plt.show()
|
SalemAmeen/bayespy
|
bayespy/demos/black_box.py
|
Python
|
mit
| 3,429
|
[
"Gaussian"
] |
d46ab69e4bc85a813895465a93540fa10fe727cb3636001d9f7a4f742ea0e06d
|
#!/usr/bin/python3
"""
Local Laplacian, see e.g. Aubry et al 2011, "Fast and Robust Pyramid-based Image Processing".
"""
from __future__ import division # if running with python2
from halide import *
import numpy as np
from scipy.misc import imread, imsave
import os.path
int_t = Int(32)
float_t = Float(32)
def get_local_laplacian(input, levels, alpha, beta, J=8):
downsample_counter=[0]
upsample_counter=[0]
x = Var('x')
y = Var('y')
def downsample(f):
downx, downy = Func('downx%d'%downsample_counter[0]), Func('downy%d'%downsample_counter[0])
downsample_counter[0] += 1
downx[x,y,c] = (f[2*x-1,y,c] + 3.0*(f[2*x,y,c]+f[2*x+1,y,c]) + f[2*x+2,y,c])/8.0
downy[x,y,c] = (downx[x,2*y-1,c] + 3.0*(downx[x,2*y,c]+downx[x,2*y+1,c]) + downx[x,2*y+2,c])/8.0
return downy
def upsample(f):
upx, upy = Func('upx%d'%upsample_counter[0]), Func('upy%d'%upsample_counter[0])
upsample_counter[0] += 1
upx[x,y,c] = 0.25 * f[(x//2) - 1 + 2*(x%2),y,c] + 0.75 * f[x//2,y,c]
upy[x,y,c] = 0.25 * upx[x, (y//2) - 1 + 2*(y%2),c] + 0.75 * upx[x,y//2,c]
return upy
def downsample2D(f):
downx, downy = Func('downx%d'%downsample_counter[0]), Func('downy%d'%downsample_counter[0])
downsample_counter[0] += 1
downx[x,y] = (f[2*x-1,y] + 3.0*(f[2*x,y]+f[2*x+1,y]) + f[2*x+2,y])/8.0
downy[x,y] = (downx[x,2*y-1] + 3.0*(downx[x,2*y]+downx[x,2*y+1]) + downx[x,2*y+2])/8.0
return downy
def upsample2D(f):
upx, upy = Func('upx%d'%upsample_counter[0]), Func('upy%d'%upsample_counter[0])
upsample_counter[0] += 1
upx[x,y] = 0.25 * f[(x//2) - 1 + 2*(x%2),y] + 0.75 * f[x//2,y]
upy[x,y] = 0.25 * upx[x, (y//2) - 1 + 2*(y%2)] + 0.75 * upx[x,y//2]
return upy
# THE ALGORITHM
# loop variables
c = Var('c')
k = Var('k')
# Make the remapping function as a lookup table.
remap = Func('remap')
fx = cast(float_t, x/256.0)
#remap[x] = alpha*fx*exp(-fx*fx/2.0)
remap[x] = alpha*fx*exp(-fx*fx/2.0)
# Convert to floating point
floating = Func('floating')
floating[x,y,c] = cast(float_t, input[x,y,c]) / 65535.0
# Set a boundary condition
clamped = Func('clamped')
clamped[x,y,c] = floating[clamp(x, 0, input.width()-1), clamp(y, 0, input.height()-1), c]
# Get the luminance channel
gray = Func('gray')
gray[x,y] = 0.299*clamped[x,y,0] + 0.587*clamped[x,y,1] + 0.114*clamped[x,y,2]
# Make the processed Gaussian pyramid.
gPyramid = [Func('gPyramid%d'%i) for i in range(J)]
# Do a lookup into a lut with 256 entires per intensity level
level = k / (levels - 1)
idx = gray[x,y]*cast(float_t, levels-1)*256.0
idx = clamp(cast(int_t, idx), 0, (levels-1)*256)
gPyramid[0][x,y,k] = beta*(gray[x, y] - level) + level + remap[idx - 256*k]
for j in range(1,J):
gPyramid[j][x,y,k] = downsample(gPyramid[j-1])[x,y,k]
# Get its laplacian pyramid
lPyramid = [Func('lPyramid%d'%i) for i in range(J)]
lPyramid[J-1] = gPyramid[J-1]
for j in range(J-1)[::-1]:
lPyramid[j][x,y,k] = gPyramid[j][x,y,k] - upsample(gPyramid[j+1])[x,y,k]
# Make the Gaussian pyramid of the input
inGPyramid = [Func('inGPyramid%d'%i) for i in range(J)]
inGPyramid[0] = gray
for j in range(1,J):
inGPyramid[j][x,y] = downsample2D(inGPyramid[j-1])[x,y]
# Make the laplacian pyramid of the output
outLPyramid = [Func('outLPyramid%d'%i) for i in range(J)]
for j in range(J):
# Split input pyramid value into integer and floating parts
level = inGPyramid[j][x,y]*cast(float_t, levels-1)
li = clamp(cast(int_t, level), 0, levels-2)
lf = level - cast(float_t, li)
# Linearly interpolate between the nearest processed pyramid levels
outLPyramid[j][x,y] = (1.0-lf)*lPyramid[j][x,y,li] + lf*lPyramid[j][x,y,li+1]
# Make the Gaussian pyramid of the output
outGPyramid = [Func('outGPyramid%d'%i) for i in range(J)]
outGPyramid[J-1] = outLPyramid[J-1]
for j in range(J-1)[::-1]:
outGPyramid[j][x,y] = upsample2D(outGPyramid[j+1])[x,y] + outLPyramid[j][x,y]
# Reintroduce color (Connelly: use eps to avoid scaling up noise w/ apollo3.png input)
color = Func('color')
eps = 0.01
color[x,y,c] = outGPyramid[0][x,y] * (clamped[x,y,c] + eps) / (gray[x,y] + eps)
output = Func('local_laplacian')
# Convert back to 16-bit
output[x,y,c] = cast(UInt(16), clamp(color[x,y,c], 0.0, 1.0) * 65535.0)
# THE SCHEDULE
remap.compute_root()
target = get_target_from_environment()
if target.has_gpu_feature():
# GPU Schedule
print ("Compiling for GPU")
output.compute_root().gpu_tile(x, y, 32, 32, GPU_Default)
for j in range(J):
blockw = 32
blockh = 16
if j > 3:
blockw = 2
blockh = 2
if j > 0:
inGPyramid[j].compute_root().gpu_tile(x, y, blockw, blockh, GPU_Default)
if j > 0:
gPyramid[j].compute_root().reorder(k, x, y).gpu_tile(x, y, blockw, blockh, GPU_Default)
outGPyramid[j].compute_root().gpu_tile(x, y, blockw, blockh, GPU_Default)
else:
# CPU schedule
print ("Compiling for CPU")
output.parallel(y, 4).vectorize(x, 4);
gray.compute_root().parallel(y, 4).vectorize(x, 4);
for j in range(4):
if j > 0:
inGPyramid[j].compute_root().parallel(y, 4).vectorize(x, 4)
if j > 0:
gPyramid[j].compute_root().parallel(y, 4).vectorize(x, 4)
outGPyramid[j].compute_root().parallel(y).vectorize(x, 4)
for j in range(4,J):
inGPyramid[j].compute_root().parallel(y)
gPyramid[j].compute_root().parallel(k)
outGPyramid[j].compute_root().parallel(y)
return output
def generate_compiled_file(local_laplacian):
# Need to copy the process executable from the C++ apps/local_laplacian folder to run this.
# (after making it of course)
arguments = ArgumentsVector()
arguments.append(Argument('levels', False, int_t))
arguments.append(Argument('alpha', False, float_t))
arguments.append(Argument('beta', False, float_t))
arguments.append(Argument('input', True, UInt(16)))
target = get_target_from_environment()
local_laplacian.compile_to_file("local_laplacian", arguments, "local_laplacian", target)
print("Generated compiled file for local_laplacian function.")
return
def get_input_data():
image_path = os.path.join(os.path.dirname(__file__), "../../apps/images/rgb.png")
assert os.path.exists(image_path), \
"Could not find %s" % image_path
rgb_data = imread(image_path)
#print("rgb_data", type(rgb_data), rgb_data.shape, rgb_data.dtype)
input_data = np.copy(rgb_data.astype(np.uint16), order="F") << 8
# input data is in range [0, 256*256]
#print("input_data", type(input_data), input_data.shape, input_data.dtype)
return input_data
def filter_test_image(local_laplacian, input):
local_laplacian.compile_jit()
# preparing input and output memory buffers (numpy ndarrays)
input_data = get_input_data()
input_image = Buffer(input_data)
input.set(input_image)
output_data = np.empty(input_data.shape, dtype=input_data.dtype, order="F")
output_image = Buffer(output_data)
if False:
print("input_image", input_image)
print("output_image", output_image)
# do the actual computation
local_laplacian.realize(output_image)
# save results
input_path = "local_laplacian_input.png"
output_path = "local_laplacian.png"
imsave(input_path, input_data)
imsave(output_path, output_data)
print("\nlocal_laplacian realized on output_image.")
print("Result saved at '", output_path,
"' ( input data copy at '", input_path, "' ).", sep="")
return
def main():
input = ImageParam(UInt(16), 3, 'input')
# number of intensity levels
levels = Param(int_t, 'levels', 8)
#Parameters controlling the filter
alpha = Param(float_t, 'alpha', 1.0/7.0)
beta = Param(float_t, 'beta', 1.0)
local_laplacian = get_local_laplacian(input, levels, alpha, beta)
generate = False # Set to False to run the jit immediately and get instant gratification.
if generate:
generate_compiled_file(local_laplacian)
else:
filter_test_image(local_laplacian, input)
return
if __name__ == '__main__':
main()
|
ronen/Halide
|
python_bindings/apps/local_laplacian.py
|
Python
|
mit
| 8,634
|
[
"Gaussian"
] |
9b86c182161c54c83a58f6ab6bfdf8b9c4b6a5b033bdd9cff6f6b62858df75c1
|
#!/usr/bin/env python
import argparse
import sys
import time
from contact_utils import *
from traj_utils import load_Trajs_generator
parser = argparse.ArgumentParser(usage="""{} Trajs*.nc Topology.prmtop""".
format(sys.argv[0]),
epilog="""Load up a list of AMBER NetCDF
trajectories and their corresponding topology
with MDtraj. Calculate a heatmap between the
two specified masks""")
parser.add_argument("Trajectories", help="""An indefinite amount of AMBER
trajectories""", nargs="+")
parser.add_argument("Topology", help="""The topology .prmtop file that matches
the trajectories""")
parser.add_argument("-s1", "--start1", help="""0-indexed value for the first
residue of Mask1""", type=int)
parser.add_argument("-e1", "--end1", help="""0-indexed value for the final
residue of Mask1""", type=int)
parser.add_argument("-s2", "--start2", help="""0-indexed value for the first
residue of Mask2""", type=int)
parser.add_argument("-e2", "--end2", help="""0-indexed value for the final
residue of Mask2""", type=int)
parser.add_argument("Map_type", help="""Type of calculation for the contact map.
Can be either mdtraj or cheng style.""", choices=['mdtraj',
'cheng'])
parser.add_argument("-s", "--save", help="Save the plots as .png images",
action="store_true")
parser.add_argument("-st", "--stride", help="""Stride for the loading of the
trajectory. Must be a divisor of the chunk.
Default value is 1.""", default=1, type=int)
parser.add_argument("-ch", "--chunk", help="""Number of frames that will be
used by md.iterload to load up the trajectories. Must be
a multiplier of the stride.
Default is 100 frames.""", default=100, type=int)
parser.add_argument("-t", "--title", help="""Name of the image where
plot is stored. Default is PCA.""", default="PCA.png")
args = parser.parse_args()
def main():
if args:
print('\n', args, '\n')
start = time.time()
mask1, mask2, pairs = get_residuepairs(args.start1, args.end1,
args.start2, args.end2)
trjs = load_Trajs_generator(sorted(args.Trajectories),
prmtop_file=args.Topology,
stride=args.stride,
chunk=args.chunk)
print("Trajectories have been loaded after %.2f s.\n" %
(time.time() - start))
if args.Map_type == 'mdtraj':
cmap = cmap_MDtraj(trjs, mask1, mask2, pairs)
print("MDtraj style cmap has been calculated after %.2f s.\n" %
(time.time() - start))
else:
cmap = cmap_Cheng(trjs, mask1, mask2, pairs)
print("Cheng style cmap has been calculated after %.2f s.\n" %
(time.time() - start))
plot_heatmap(cmap, mask1, mask2)
print("Total execution time: %.2f s." % (time.time() - start))
if __name__ == "__main__":
main()
|
jeiros/Scripts
|
AnalysisMDTraj/contacts.py
|
Python
|
mit
| 3,435
|
[
"Amber",
"MDTraj",
"NetCDF"
] |
0116fa01829c61c3b52163f9db4a0156760f823cf0b2d652c1ac108cd8c7ce19
|
import time
import splinter
from lettuce import step,before,world,after
from lettuce.django import django_url
from django.contrib.auth.models import User
from frontend.models import UserProfile, Feature
from nose.tools import assert_equals
# Steps used in more than one feature's steps file
# Human readable CSS selectors
SELECTORS = {
'need help box': "div[@id='help']",
}
# Features
@step(u'(?:Given|And) the "([^"]*)" feature exists')
def and_the_feature_exists(step, feature):
Feature.objects.filter(name=feature).delete()
Feature.objects.create(name=feature, public=True)
@step(u'(?:Given|And) I have the "([^"]*)" feature enabled')
def and_i_have_a_feature_enabled(step, feature):
u = User.objects.filter(username='test')[0]
feature = Feature.objects.filter(name=feature)[0]
profile = u.get_profile();
profile.features.add(feature)
assert profile.has_feature(feature)
@step(u'And I do not have the "([^"]*)" feature enabled$')
def feature_not_enabled(step, feature):
u = User.objects.filter(username='test')[0]
feature = Feature.objects.filter(name=feature)[0]
profile = u.get_profile();
try:
profile.features.remove(feature)
except ValueError:
# Expected when the user already does not have the
# feature in question.
pass
assert not profile.has_feature(feature)
# Payment plan
@step(u'(?:Given|And) I am an? "([^"]*)" user')
def given_i_am_a_plan_user(step, plan):
plan = plan.replace(' ', '').lower()
step.behave_as("""
Given user "test" with password "pass" is logged in
And I am on the "%s" plan
""" % plan)
@step(u'And I am on the "([^"]*)" plan')
def and_i_am_on_the_plan(step, plan):
user = User.objects.get(username='test')
profile = user.get_profile()
profile.change_plan(plan)
# Scrapers
@step(u"(?:When|And) I visit my scraper's overview page$")
def i_am_on_the_scraper_overview_page(step):
world.browser.visit(django_url('/scrapers/test_scraper'))
# Seeing matchers
@step(u'(?:Then|And) I should (not )?see "([^"]*)"')
def and_i_should_not_see_text(step, negative, text):
x = world.browser.is_text_present(text)
if not negative:
assert x
else:
assert not x
@step(u'(?:Then|And) I should (not )?see (?:the|a|an) "([^"]*)" (?:link|button)$')
def i_should_see_the_button(step, negative, text):
x = world.browser.find_link_by_partial_text(text)
if not negative:
assert x
else:
assert x == []
@step(u'(?:Then|And) I should see (?:the|a|an) "([^"]*)" (?:link|button) in the (.+)')
def i_should_see_the_button_in_parent(step, text, parent_name):
xpath = ".//%s//a[contains(.,'%s')]" % (
SELECTORS[parent_name], text)
assert world.browser.find_by_xpath(xpath)
# Clicking
@step(u'(?:And|When) I click "([^"]*)"')
def and_i_click(step, text):
# :todo: Make it not wrong. so wrong.
world.browser.find_by_tag("button").first.click()
@step(u'(?:When|And) I click the "([^"]*)" (?:link|button)$')
def i_click_the_button(step, text):
try:
world.browser.find_link_by_partial_text(text).first.click()
except splinter.exceptions.ElementDoesNotExist:
# Sometimes we have an actual button, in which case we end up here.
world.browser.find_by_value(text).first.click()
# Alerts
@step(u'(?:Then|And) I close the alert')
def and_i_close_the_alert(step):
world.browser.find_by_css('#alert_close').first.click()
|
rossjones/ScraperWikiX
|
web/frontend/features/common_steps.py
|
Python
|
agpl-3.0
| 3,493
|
[
"VisIt"
] |
ca5a88344818e23c75eae1eb5ebc9d516ca3443d7667a9f036309f416c37be99
|
#
# Attempting to replicate lane detection results described in this tutorial by Naoki Shibuya:
# https://medium.com/towards-data-science/finding-lane-lines-on-the-road-30cf016a1165
# For more see: https://github.com/naokishibuya/car-finding-lane-lines
#
# This 2nd version does a much better job of processing images.
#
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import sys
import subprocess
import os
import shutil
def convert_hls(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
def select_white_yellow(image):
converted = convert_hls(image)
lower = np.uint8([ 0, 200, 0])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(converted, lower, upper)
lower = np.uint8([ 10, 0, 100])
upper = np.uint8([ 40, 255, 255])
yellow_mask = cv2.inRange(converted, lower, upper)
mask = cv2.bitwise_or(white_mask, yellow_mask)
return cv2.bitwise_and(image, image, mask = mask)
def convert_gray_scale(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def apply_smoothing(image, kernel_size=15):
return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
def detect_edges(image, low_threshold=50, high_threshold=150):
return cv2.Canny(image, low_threshold, high_threshold)
def filter_region(image, vertices):
mask = np.zeros_like(image)
if len(mask.shape)==2:
cv2.fillPoly(mask, vertices, 255)
else:
cv2.fillPoly(mask, vertices, (255,)*mask.shape[2])
return cv2.bitwise_and(image, mask)
def select_region(image):
rows, cols = image.shape[:2]
bottom_left = [cols*0.1, rows*0.95]
top_left = [cols*0.4, rows*0.6]
bottom_right = [cols*0.9, rows*0.95]
top_right = [cols*0.6, rows*0.6]
vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
return filter_region(image, vertices)
def hough_lines(image):
return cv2.HoughLinesP(image, rho=1, theta=np.pi/180, threshold=20, minLineLength=20, maxLineGap=300)
def average_slope_intercept(lines):
left_lines = []
left_weights = []
right_lines = []
right_weights = []
for line in lines:
for x1, y1, x2, y2 in line:
if x2==x1:
continue
slope = (y2-y1)/(x2-x1)
intercept = y1 - slope*x1
length = np.sqrt((y2-y1)**2+(x2-x1)**2)
if slope < 0:
left_lines.append((slope, intercept))
left_weights.append((length))
else:
right_lines.append((slope, intercept))
right_weights.append((length))
left_lane = np.dot(left_weights, left_lines) /np.sum(left_weights) if len(left_weights) >0 else None
right_lane = np.dot(right_weights, right_lines)/np.sum(right_weights) if len(right_weights)>0 else None
return left_lane, right_lane
def make_line_points(y1, y2, line):
if line is None:
return None
slope, intercept = line
x1 = int((y1 - intercept)/slope)
x2 = int((y2 - intercept)/slope)
y1 = int(y1)
y2 = int(y2)
return ((x1, y1), (x2, y2))
def lane_lines(image, lines):
left_lane, right_lane = average_slope_intercept(lines)
y1 = image.shape[0]
y2 = y1*0.6
left_line = make_line_points(y1, y2, left_lane)
right_line = make_line_points(y1, y2, right_lane)
return left_line, right_line
def draw_lane_lines(image, lines, color=[255, 0, 0], thickness=20):
line_image = np.zeros_like(image)
for line in lines:
if line is not None:
cv2.line(line_image, *line, color, thickness)
return cv2.addWeighted(image, 1.0, line_image, 0.95, 0.0)
def mark_failed(image):
font = cv2.FONT_HERSHEY_SIMPLEX
text = "DETECT FAILED!"
textsize = cv2.getTextSize(text, font, 2, 5)[0]
textX = int((image.shape[1] - textsize[0]) / 2)
textY = int((image.shape[0] + textsize[1]) / 2)
cv2.putText(image, text, (textX, textY), font, 2, (255, 0, 0), 5)
return image
def process_image(dirpath, image_file):
if not os.path.exists('tmp'):
os.mkdir('tmp')
if not os.path.exists('output'):
os.makedirs('output')
image_name = os.path.splitext(image_file)[0]
# First load and show the sample image
image = mpimg.imread("{0}/{1}".format(dirpath, image_file))
im = plt.imshow(image)
plt.savefig('tmp/1.png')
# Now select the white and yellow lines
white_yellow = select_white_yellow(image)
im = plt.imshow(white_yellow, cmap='gray')
plt.savefig('tmp/2.png')
# Now convert to grayscale
gray_scale = convert_gray_scale(white_yellow)
im = plt.imshow(gray_scale, cmap='gray')
plt.savefig('tmp/3.png')
# Then apply a Gaussian blur
blurred_image = apply_smoothing(gray_scale)
im = plt.imshow(blurred_image, cmap='gray')
plt.savefig('tmp/4.png')
# Detect line edges
edged_image = detect_edges(blurred_image)
im = plt.imshow(edged_image, cmap='gray')
plt.savefig('tmp/5.png')
# Now ignore all but the area of interest
masked_image = select_region(edged_image)
im = plt.imshow(masked_image, cmap='gray')
plt.savefig('tmp/6.png')
# Apply Houghed lines algorithm
houghed_lines = hough_lines(masked_image)
if houghed_lines is not None:
houghed_image = draw_lane_lines(image, lane_lines(image, houghed_lines))
im = plt.imshow(houghed_image, cmap='gray')
output_name = "output/{0}_passed.gif".format(image_name)
print("Detected lanes in '{0}/{1}'. See result in '{2}'.".format(dirpath, image_file, output_name))
else:
im = plt.imshow(mark_failed(image), cmap='gray')
output_name = "output/{0}_failed.gif".format(image_name)
print("Failed detection in '{0}/{1}'. See result in '{2}'.".format(dirpath, image_file, output_name))
plt.savefig('tmp/7.png')
# Repeat last image in the loop a couple of times.
plt.savefig('tmp/8.png')
plt.savefig('tmp/9.png')
# Now generate an animated gif of the image stages
subprocess.call( ['convert', '-delay', '100', '-loop', '0', 'tmp/*.png', output_name] )
shutil.rmtree('tmp')
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Usage: python3 ./lane_detect.py images/*")
else:
for arg in sys.argv[1:]:
if not os.path.isfile(arg):
print("Not a file: {0}".format(arg))
else:
dirpath,filename = os.path.split(arg)
process_image(dirpath, filename)
|
guydavis/lane-detect
|
older/lane_detect.v2.py
|
Python
|
mit
| 6,587
|
[
"Gaussian"
] |
358b0fc8d024ec40d2481e97297b8cf92abb76cbadbe763d8e1a7b3a04f2bacb
|
#!/usr/bin/env python
'''
Run the charged system with Makov-Payne correction.
'''
import numpy
from pyscf.pbc import gto, scf
cell = gto.M(atom='Al 0 0 0', basis='lanl2dz', ecp='lanl2dz',
spin=0, a=numpy.eye(3)*6, charge=1, dimension=3)
mf = scf.RHF(cell)
mf.run()
|
gkc1000/pyscf
|
examples/pbc/28-charged_system.py
|
Python
|
apache-2.0
| 281
|
[
"PySCF"
] |
03324096b895fff0669e6475575243e39ab12354540d216f3f5d6abc9df5b40c
|
"""
A simple VTK widget for PyQt or PySide.
See http://www.trolltech.com for Qt documentation,
http://www.riverbankcomputing.co.uk for PyQt, and
http://pyside.github.io for PySide.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
Changes by Phil Thompson, Nov. 2006
Ported to PyQt v4.
Added support for wheel events.
Changes by Phil Thompson, Oct. 2007
Bug fixes.
Changes by Phil Thompson, Mar. 2008
Added cursor support.
Changes by Rodrigo Mologni, Sep. 2013 (Credit to Daniele Esposti)
Bug fix to PySide: Converts PyCObject to void pointer.
Changes by Greg Schussman, Aug. 2014
The keyPressEvent function now passes keysym instead of None.
Changes by Alex Tsui, Apr. 2015
Port from PyQt4 to PyQt5.
Changes by Fabian Wenzel, Jan. 2016
Support for Python3
"""
# Check whether a specific PyQt implementation was chosen
try:
import vtk.qt
PyQtImpl = vtk.qt.PyQtImpl
except ImportError:
pass
if PyQtImpl is None:
# Autodetect the PyQt implementation to use
try:
import PyQt5
PyQtImpl = "PyQt5"
except ImportError:
try:
import PyQt4
PyQtImpl = "PyQt4"
except ImportError:
try:
import PySide
PyQtImpl = "PySide"
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
if PyQtImpl == "PyQt5":
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QSize
from PyQt5.QtCore import QEvent
elif PyQtImpl == "PyQt4":
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QSizePolicy
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import Qt
from PyQt4.QtCore import QTimer
from PyQt4.QtCore import QObject
from PyQt4.QtCore import QSize
from PyQt4.QtCore import QEvent
elif PyQtImpl == "PySide":
from PySide.QtGui import QWidget
from PySide.QtGui import QSizePolicy
from PySide.QtGui import QApplication
from PySide.QtCore import Qt
from PySide.QtCore import QTimer
from PySide.QtCore import QObject
from PySide.QtCore import QSize
from PySide.QtCore import QEvent
else:
raise ImportError("Unknown PyQt implementation " + repr(PyQtImpl))
class QVTKRenderWindowInteractor(QWidget):
""" A QVTKRenderWindowInteractor for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
# Map between VTK and Qt cursors.
_CURSOR_MAP = {
0: Qt.ArrowCursor, # VTK_CURSOR_DEFAULT
1: Qt.ArrowCursor, # VTK_CURSOR_ARROW
2: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZENE
3: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZENWSE
4: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZESW
5: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZESE
6: Qt.SizeVerCursor, # VTK_CURSOR_SIZENS
7: Qt.SizeHorCursor, # VTK_CURSOR_SIZEWE
8: Qt.SizeAllCursor, # VTK_CURSOR_SIZEALL
9: Qt.PointingHandCursor, # VTK_CURSOR_HAND
10: Qt.CrossCursor, # VTK_CURSOR_CROSSHAIR
}
def __init__(self, parent=None, wflags=Qt.WindowFlags(), **kw):
# the current button
self._ActiveButton = Qt.NoButton
# private attributes
self.__saveX = 0
self.__saveY = 0
self.__saveModifiers = Qt.NoModifier
self.__saveButtons = Qt.NoButton
self.__wheelDelta = 0
# do special handling of some keywords:
# stereo, rw
try:
stereo = bool(kw['stereo'])
except KeyError:
stereo = False
try:
rw = kw['rw']
except KeyError:
rw = None
# create qt-level widget
QWidget.__init__(self, parent, wflags|Qt.MSWindowsOwnDC)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
WId = self.winId()
# Python2
if type(WId).__name__ == 'PyCObject':
from ctypes import pythonapi, c_void_p, py_object
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
WId = pythonapi.PyCObject_AsVoidPtr(WId)
# Python3
elif type(WId).__name__ == 'PyCapsule':
from ctypes import pythonapi, c_void_p, py_object, c_char_p
pythonapi.PyCapsule_GetName.restype = c_char_p
pythonapi.PyCapsule_GetName.argtypes = [py_object]
name = pythonapi.PyCapsule_GetName(WId)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
WId = pythonapi.PyCapsule_GetPointer(WId, name)
self._RenderWindow.SetWindowInfo(str(int(WId)))
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
try:
self._Iren = kw['iren']
except KeyError:
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
# do all the necessary qt setup
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.setAttribute(Qt.WA_PaintOnScreen)
self.setMouseTracking(True) # get all mouse events
self.setFocusPolicy(Qt.WheelFocus)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self._Timer = QTimer(self)
self._Timer.timeout.connect(self.TimerEvent)
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
#Create a hidden child widget and connect its destroyed signal to its
#parent ``Finalize`` slot. The hidden children will be destroyed before
#its parent thus allowing cleanup of VTK elements.
self._hidden = QWidget(self)
self._hidden.hide()
self._hidden.destroyed.connect(self.Finalize)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(self.__class__.__name__ +
" has no attribute named " + attr)
def Finalize(self):
'''
Call internal cleanup method on VTK objects
'''
self._RenderWindow.Finalize()
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the current
# cursor is not yet set so we defer this by which time the current
# cursor should have been set.
QTimer.singleShot(0, self.ShowCursor)
def HideCursor(self):
"""Hides the cursor."""
self.setCursor(Qt.BlankCursor)
def ShowCursor(self):
"""Shows the cursor."""
vtk_cursor = self._Iren.GetRenderWindow().GetCurrentCursor()
qt_cursor = self._CURSOR_MAP.get(vtk_cursor, Qt.ArrowCursor)
self.setCursor(qt_cursor)
def closeEvent(self, evt):
self.Finalize()
def sizeHint(self):
return QSize(400, 400)
def paintEngine(self):
return None
def paintEvent(self, ev):
self._Iren.Render()
def resizeEvent(self, ev):
w = self.width()
h = self.height()
vtk.vtkRenderWindow.SetSize(self._RenderWindow, w, h)
self._Iren.SetSize(w, h)
self._Iren.ConfigureEvent()
self.update()
def _GetCtrlShift(self, ev):
ctrl = shift = False
if hasattr(ev, 'modifiers'):
if ev.modifiers() & Qt.ShiftModifier:
shift = True
if ev.modifiers() & Qt.ControlModifier:
ctrl = True
else:
if self.__saveModifiers & Qt.ShiftModifier:
shift = True
if self.__saveModifiers & Qt.ControlModifier:
ctrl = True
return ctrl, shift
def enterEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == QEvent.MouseButtonDblClick:
repeat = 1
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = ev.button()
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonPressEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonPressEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonPressEvent()
def mouseReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonReleaseEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self, ev):
self.__saveModifiers = ev.modifiers()
self.__saveButtons = ev.buttons()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = str(ev.text())
else:
key = chr(0)
keySym = _qt_key_to_key_sym(ev.key())
if shift and len(keySym) == 1 and keySym.isalpha():
keySym = keySym.upper()
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, keySym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyReleaseEvent()
def wheelEvent(self, ev):
if hasattr(ev, 'delta'):
self.__wheelDelta += ev.delta()
else:
self.__wheelDelta += ev.angleDelta().y()
if self.__wheelDelta >= 120:
self._Iren.MouseWheelForwardEvent()
self.__wheelDelta = 0
elif self.__wheelDelta <= -120:
self._Iren.MouseWheelBackwardEvent()
self.__wheelDelta = 0
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self.update()
def QVTKRenderWidgetConeExample():
"""A simple example that uses the QVTKRenderWindowInteractor class."""
# every QT app needs an app
app = QApplication(['QVTKRenderWindowInteractor'])
# create the widget
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
# if you dont want the 'q' key to exit comment this.
widget.AddObserver("ExitEvent", lambda o, e, a=app: a.quit())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# start event processing
app.exec_()
_keysyms = {
Qt.Key_Backspace: 'BackSpace',
Qt.Key_Tab: 'Tab',
Qt.Key_Backtab: 'Tab',
# Qt.Key_Clear : 'Clear',
Qt.Key_Return: 'Return',
Qt.Key_Enter: 'Return',
Qt.Key_Shift: 'Shift_L',
Qt.Key_Control: 'Control_L',
Qt.Key_Alt: 'Alt_L',
Qt.Key_Pause: 'Pause',
Qt.Key_CapsLock: 'Caps_Lock',
Qt.Key_Escape: 'Escape',
Qt.Key_Space: 'space',
# Qt.Key_Prior : 'Prior',
# Qt.Key_Next : 'Next',
Qt.Key_End: 'End',
Qt.Key_Home: 'Home',
Qt.Key_Left: 'Left',
Qt.Key_Up: 'Up',
Qt.Key_Right: 'Right',
Qt.Key_Down: 'Down',
Qt.Key_SysReq: 'Snapshot',
Qt.Key_Insert: 'Insert',
Qt.Key_Delete: 'Delete',
Qt.Key_Help: 'Help',
Qt.Key_0: '0',
Qt.Key_1: '1',
Qt.Key_2: '2',
Qt.Key_3: '3',
Qt.Key_4: '4',
Qt.Key_5: '5',
Qt.Key_6: '6',
Qt.Key_7: '7',
Qt.Key_8: '8',
Qt.Key_9: '9',
Qt.Key_A: 'a',
Qt.Key_B: 'b',
Qt.Key_C: 'c',
Qt.Key_D: 'd',
Qt.Key_E: 'e',
Qt.Key_F: 'f',
Qt.Key_G: 'g',
Qt.Key_H: 'h',
Qt.Key_I: 'i',
Qt.Key_J: 'j',
Qt.Key_K: 'k',
Qt.Key_L: 'l',
Qt.Key_M: 'm',
Qt.Key_N: 'n',
Qt.Key_O: 'o',
Qt.Key_P: 'p',
Qt.Key_Q: 'q',
Qt.Key_R: 'r',
Qt.Key_S: 's',
Qt.Key_T: 't',
Qt.Key_U: 'u',
Qt.Key_V: 'v',
Qt.Key_W: 'w',
Qt.Key_X: 'x',
Qt.Key_Y: 'y',
Qt.Key_Z: 'z',
Qt.Key_Asterisk: 'asterisk',
Qt.Key_Plus: 'plus',
Qt.Key_Minus: 'minus',
Qt.Key_Period: 'period',
Qt.Key_Slash: 'slash',
Qt.Key_F1: 'F1',
Qt.Key_F2: 'F2',
Qt.Key_F3: 'F3',
Qt.Key_F4: 'F4',
Qt.Key_F5: 'F5',
Qt.Key_F6: 'F6',
Qt.Key_F7: 'F7',
Qt.Key_F8: 'F8',
Qt.Key_F9: 'F9',
Qt.Key_F10: 'F10',
Qt.Key_F11: 'F11',
Qt.Key_F12: 'F12',
Qt.Key_F13: 'F13',
Qt.Key_F14: 'F14',
Qt.Key_F15: 'F15',
Qt.Key_F16: 'F16',
Qt.Key_F17: 'F17',
Qt.Key_F18: 'F18',
Qt.Key_F19: 'F19',
Qt.Key_F20: 'F20',
Qt.Key_F21: 'F21',
Qt.Key_F22: 'F22',
Qt.Key_F23: 'F23',
Qt.Key_F24: 'F24',
Qt.Key_NumLock: 'Num_Lock',
Qt.Key_ScrollLock: 'Scroll_Lock',
}
def _qt_key_to_key_sym(key):
""" Convert a Qt key into a vtk keysym.
This is essentially copied from the c++ implementation in
GUISupport/Qt/QVTKInteractorAdapter.cxx.
"""
if key not in _keysyms:
return None
return _keysyms[key]
if __name__ == "__main__":
print(PyQtImpl)
QVTKRenderWidgetConeExample()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Wrapping/Python/vtk/qt/QVTKRenderWindowInteractor.py
|
Python
|
gpl-3.0
| 18,922
|
[
"CRYSTAL",
"VTK"
] |
fb4b7d494860d4ee09488b624deb0d58fa76368f3243890eb20b57882ad54832
|
from FactorySystem import InputParameters
import os, sys, shutil
# Get the real path of cluster_launcher
if(os.path.islink(sys.argv[0])):
pathname = os.path.dirname(os.path.realpath(sys.argv[0]))
else:
pathname = os.path.dirname(sys.argv[0])
pathname = os.path.abspath(pathname)
# Add the utilities/python_getpot directory
MOOSE_DIR = os.path.abspath(os.path.join(pathname, '../../'))
FRAMEWORK_DIR = os.path.abspath(os.path.join(pathname, '../../', 'framework'))
#### See if MOOSE_DIR is already in the environment instead
if os.environ.has_key("MOOSE_DIR"):
MOOSE_DIR = os.environ['MOOSE_DIR']
FRAMEWORK_DIR = os.path.join(MOOSE_DIR, 'framework')
if os.environ.has_key("FRAMEWORK_DIR"):
FRAMEWORK_DIR = os.environ['FRAMEWORK_DIR']
# Import the TestHarness and Helper functions from the MOOSE toolkit
sys.path.append(os.path.join(MOOSE_DIR, 'python'))
import path_tool
path_tool.activate_module('TestHarness')
path_tool.activate_module('FactorySystem')
class Job(object):
def validParams():
params = InputParameters()
params.addRequiredParam('type', "The type of test of Tester to create for this test.")
params.addParam('template_script', MOOSE_DIR + '/python/ClusterLauncher/pbs_submit.sh', "The template job script to use.")
params.addParam('job_name', 'The name of the job')
params.addParam('test_name', 'The name of the test')
return params
validParams = staticmethod(validParams)
def __init__(self, name, params):
self.specs = params
# Called from the current directory to copy files (usually from the parent)
def copyFiles(self, job_file):
for file in os.listdir('../'):
if os.path.isfile('../' + file) and file != job_file:
shutil.copy('../' + file, '.')
# Called to prepare a job script if necessary
def prepareJobScript(self):
return
# Called to launch the job
def launch(self):
return
|
xy515258/moose
|
python/ClusterLauncher/Job.py
|
Python
|
lgpl-2.1
| 1,889
|
[
"MOOSE"
] |
85d4abc266feab0e0137dba06955d00d39368f69f960e3f886600c1c6cd28c6e
|
import os
import warnings
# cmr calls all available methods in ase.atoms detected by the module inspect.
# Therefore also deprecated methods are called - and we choose to silence those warnings.
warnings.filterwarnings('ignore', 'ase.atoms.*deprecated',)
from ase.test import NotAvailable
# if CMR_SETTINGS_FILE is missing, cmr raises simply
# Exception("CMR is not configured properly. Please create the settings file with cmr --create-settings.")
try:
import cmr
except (Exception, ImportError):
raise NotAvailable('CMR is required')
from ase.calculators.emt import EMT
from ase.structure import molecule
from ase.io import write
# project id: must uniquely identify the project!
project_id = 'simple reaction energies'
reaction = [('N2', -1), ('N', 2)]
calculator = EMT()
for (formula, coef) in reaction:
m = molecule(formula)
m.set_calculator(calculator)
m.get_potential_energy()
cmr_params = {
"db_keywords": [project_id],
# add project_id also as a field to support search across projects
"project_id": project_id,
"formula": formula,
"calculator": calculator.name,
}
write(filename=('reactions_xsimple.%s.db' % formula),
images=m, format='db', cmr_params=cmr_params)
# analyse the results with CMR
from cmr.ui import DirectoryReader
reader = DirectoryReader('.')
# read all compounds in the project calculated with EMT
all = reader.find(name_value_list=[('calculator', 'EMT')],
keyword_list=[project_id])
all.print_table(0, columns=["formula", "ase_potential_energy"])
print
group = cmr.create_group()
group_vars = {"reaction":reaction, "output":"group.db"}
sum = 0.0
for (formula, coef) in reaction:
data = all.get("formula", formula)
if data is None:
print "%s is missing"%formula
sum = None
break
sum += coef*data["ase_potential_energy"]
group.add(data["db_hash"])
group_vars["result"] = sum
group.write(group_vars)
print "Energy: ",sum
group.dump()
# clean
for (formula, coef) in reaction:
filename=('reactions_xsimple.%s.db' % formula)
if os.path.exists(filename): os.unlink(filename)
filename = "group.db"
if os.path.exists(filename): os.unlink(filename)
|
grhawk/ASE
|
tools/ase/test/cmr/reactions_xsimple.py
|
Python
|
gpl-2.0
| 2,261
|
[
"ASE"
] |
ff0bad4be811a65418bba59be42506a864460029be94e2fc531aad890af9aa43
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Bigquery Client library for Python."""
import abc
import collections
import datetime
import hashlib
import itertools
import json
import logging
import os
import pkgutil
import random
import re
import string
import sys
import textwrap
import time
import apiclient
from apiclient import discovery
from apiclient import http as http_request
from apiclient import model
import httplib2
# To configure apiclient logging.
import gflags as flags
# A unique non-None default, for use in kwargs that need to
# distinguish default from None.
_DEFAULT = object()
def _Typecheck(obj, types, message=None, method=None):
if not isinstance(obj, types):
if not message:
if method:
message = 'Invalid reference for %s: %r' % (method, obj)
else:
message = 'Type of %r is not one of %s' % (obj, types)
raise TypeError(message)
def _ToLowerCamel(name):
"""Convert a name with underscores to camelcase."""
return re.sub('_[a-z]', lambda match: match.group(0)[1].upper(), name)
def _ToFilename(url):
"""Converts a url to a filename."""
return ''.join([c for c in url if c in string.ascii_lowercase])
def _ApplyParameters(config, **kwds):
"""Adds all kwds to config dict, adjusting keys to camelcase.
Note this does not remove entries that are set to None, however.
kwds: A dict of keys and values to set in the config.
Args:
config: A configuration dict.
"""
config.update((_ToLowerCamel(k), v) for k, v in kwds.iteritems()
if v is not None)
def ConfigurePythonLogger(apilog=None):
"""Sets up Python logger, which BigqueryClient logs with.
Applications can configure logging however they want, but this
captures one pattern of logging which seems useful when dealing with
a single command line option for determining logging.
Args:
apilog: To log to sys.stdout, specify '', '-', '1', 'true', or
'stdout'. To log to sys.stderr, specify 'stderr'. To log to a
file, specify the file path. Specify None to disable logging.
"""
if apilog is None:
# Effectively turn off logging.
logging.disable(logging.CRITICAL)
else:
if apilog in ('', '-', '1', 'true', 'stdout'):
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
elif apilog == 'stderr':
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
elif apilog:
logging.basicConfig(filename=apilog, level=logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
# Turn on apiclient logging of http requests and responses. (Here
# we handle both the flags interface from apiclient < 1.2 and the
# module global in apiclient >= 1.2.)
if hasattr(flags.FLAGS, 'dump_request_response'):
flags.FLAGS.dump_request_response = True
else:
model.dump_request_response = True
InsertEntry = collections.namedtuple('InsertEntry',
['insert_id', 'record'])
def JsonToInsertEntry(insert_id, json_string):
"""Parses a JSON encoded record and returns an InsertEntry.
Arguments:
insert_id: Id for the insert, can be None.
json_string: The JSON encoded data to be converted.
Returns:
InsertEntry object for adding to a table.
"""
try:
row = json.loads(json_string)
if not isinstance(row, dict):
raise BigqueryClientError('Value is not a JSON object')
return InsertEntry(insert_id, row)
except ValueError, e:
raise BigqueryClientError('Could not parse object: %s' % (str(e),))
class BigqueryError(Exception):
@staticmethod
def Create(error, server_error, error_ls, job_ref=None):
"""Returns a BigqueryError for json error embedded in server_error.
If error_ls contains any errors other than the given one, those
are also included in the returned message.
Args:
error: The primary error to convert.
server_error: The error returned by the server. (This is only used
in the case that error is malformed.)
error_ls: Additional errors to include in the error message.
job_ref: JobReference, if this is an error associated with a job.
Returns:
BigqueryError representing error.
"""
reason = error.get('reason')
if job_ref:
message = 'Error processing %r: %s' % (job_ref, error.get('message'))
else:
message = error.get('message')
# We don't want to repeat the "main" error message.
new_errors = [err for err in error_ls if err != error]
if new_errors:
message += '\nFailure details:\n'
message += '\n'.join(
textwrap.fill(
': '.join(filter(None, [
err.get('location', None), err.get('message', '')])),
initial_indent=' - ',
subsequent_indent=' ')
for err in new_errors)
if not reason or not message:
return BigqueryInterfaceError(
'Error reported by server with missing error fields. '
'Server returned: %s' % (str(server_error),))
if reason == 'notFound':
return BigqueryNotFoundError(message, error, error_ls, job_ref=job_ref)
if reason == 'duplicate':
return BigqueryDuplicateError(message, error, error_ls, job_ref=job_ref)
if reason == 'accessDenied':
return BigqueryAccessDeniedError(
message, error, error_ls, job_ref=job_ref)
if reason == 'invalidQuery':
return BigqueryInvalidQueryError(
message, error, error_ls, job_ref=job_ref)
if reason == 'termsOfServiceNotAccepted':
return BigqueryTermsOfServiceError(
message, error, error_ls, job_ref=job_ref)
if reason == 'backendError':
return BigqueryBackendError(
message, error, error_ls, job_ref=job_ref)
# We map the less interesting errors to BigqueryServiceError.
return BigqueryServiceError(message, error, error_ls, job_ref=job_ref)
class BigqueryCommunicationError(BigqueryError):
"""Error communicating with the server."""
pass
class BigqueryInterfaceError(BigqueryError):
"""Response from server missing required fields."""
pass
class BigqueryServiceError(BigqueryError):
"""Base class of Bigquery-specific error responses.
The BigQuery server received request and returned an error.
"""
def __init__(self, message, error, error_list, job_ref=None,
*args, **kwds):
"""Initializes a BigqueryServiceError.
Args:
message: A user-facing error message.
error: The error dictionary, code may inspect the 'reason' key.
error_list: A list of additional entries, for example a load job
may contain multiple errors here for each error encountered
during processing.
job_ref: Optional JobReference, if this error was encountered
while processing a job.
"""
super(BigqueryServiceError, self).__init__(message, *args, **kwds)
self.error = error
self.error_list = error_list
self.job_ref = job_ref
def __repr__(self):
return '%s: error=%s, error_list=%s, job_ref=%s' % (
self.__class__.__name__, self.error, self.error_list, self.job_ref)
class BigqueryNotFoundError(BigqueryServiceError):
"""The requested resource or identifier was not found."""
pass
class BigqueryDuplicateError(BigqueryServiceError):
"""The requested resource or identifier already exists."""
pass
class BigqueryAccessDeniedError(BigqueryServiceError):
"""The user does not have access to the requested resource."""
pass
class BigqueryInvalidQueryError(BigqueryServiceError):
"""The SQL statement is invalid."""
pass
class BigqueryTermsOfServiceError(BigqueryAccessDeniedError):
"""User has not ACK'd ToS."""
pass
class BigqueryBackendError(BigqueryServiceError):
"""A backend error typically corresponding to retriable HTTP 503 failures."""
pass
class BigqueryClientError(BigqueryError):
"""Invalid use of BigqueryClient."""
pass
class BigqueryClientConfigurationError(BigqueryClientError):
"""Invalid configuration of BigqueryClient."""
pass
class BigquerySchemaError(BigqueryClientError):
"""Error in locating or parsing the schema."""
pass
class BigqueryModel(model.JsonModel):
"""Adds optional global parameters to all requests."""
def __init__(self, trace=None, **kwds):
super(BigqueryModel, self).__init__(**kwds)
self.trace = trace
# pylint: disable=g-bad-name
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing request."""
if 'trace' not in query_params and self.trace:
query_params['trace'] = self.trace
return super(BigqueryModel, self).request(
headers, path_params, query_params, body_value)
# pylint: enable=g-bad-name
# pylint: disable=g-bad-name
def response(self, resp, content):
"""Convert the response wire format into a Python object."""
return super(BigqueryModel, self).response(
resp, content)
# pylint: enable=g-bad-name
class BigqueryHttp(http_request.HttpRequest):
"""Converts errors into Bigquery errors."""
def __init__(self, bigquery_model, *args, **kwds):
super(BigqueryHttp, self).__init__(*args, **kwds)
self._model = bigquery_model
@staticmethod
def Factory(bigquery_model):
"""Returns a function that creates a BigqueryHttp with the given model."""
def _Construct(*args, **kwds):
captured_model = bigquery_model
return BigqueryHttp(captured_model, *args, **kwds)
return _Construct
def execute(self, **kwds): # pylint: disable=g-bad-name
try:
return super(BigqueryHttp, self).execute(**kwds)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging
# of error responses.
self._model._log_response(e.resp, e.content) # pylint: disable=protected-access
if e.resp.get('content-type', '').startswith('application/json'):
BigqueryClient.RaiseError(json.loads(e.content))
else:
raise BigqueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (
e.resp.get('status', '(unexpected)'), e.content))
except (httplib2.HttpLib2Error, IOError), e:
raise BigqueryCommunicationError(
'Could not connect with BigQuery server due to: %r' % (e,))
class JobIdGenerator(object):
"""Base class for job id generators."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def Generate(self, job_configuration):
"""Generates a job_id to use for job_configuration."""
class JobIdGeneratorNone(JobIdGenerator):
"""Job id generator that returns None, letting the server pick the job id."""
def Generate(self, unused_config):
return None
class JobIdGeneratorRandom(JobIdGenerator):
"""Generates random job ids."""
def Generate(self, unused_config):
return 'bqjob_r%08x_%016x' % (random.SystemRandom().randint(0, sys.maxint),
int(time.time() * 1000))
class JobIdGeneratorFingerprint(JobIdGenerator):
"""Generates job ids that uniquely match the job config."""
def _Hash(self, config, sha1):
"""Computes the sha1 hash of a dict."""
keys = config.keys()
# Python dict enumeration ordering is random. Sort the keys
# so that we will visit them in a stable order.
keys.sort()
for key in keys:
sha1.update('%s' % (key,))
v = config[key]
if isinstance(v, dict):
logging.info('Hashing: %s...', key)
self._Hash(v, sha1)
elif isinstance(v, list):
logging.info('Hashing: %s ...', key)
for inner_v in v:
self._Hash(inner_v, sha1)
else:
logging.info('Hashing: %s:%s', key, v)
sha1.update('%s' % (v,))
def Generate(self, config):
s1 = hashlib.sha1()
self._Hash(config, s1)
job_id = 'bqjob_c%s' % (s1.hexdigest(),)
logging.info('Fingerprinting: %s:\n%s', config, job_id)
return job_id
class JobIdGeneratorIncrementing(JobIdGenerator):
"""Generates job ids that increment each time we're asked."""
def __init__(self, inner):
self._inner = inner
self._retry = 0
def Generate(self, config):
self._retry += 1
return '%s_%d' % (self._inner.Generate(config), self._retry)
class BigqueryClient(object):
"""Class encapsulating interaction with the BigQuery service."""
def __init__(self, **kwds):
"""Initializes BigqueryClient.
Required keywords:
api: the api to connect to, for example "bigquery".
api_version: the version of the api to connect to, for example "v2".
Optional keywords:
project_id: a default project id to use. While not required for
initialization, a project_id is required when calling any
method that creates a job on the server. Methods that have
this requirement pass through **kwds, and will raise
BigqueryClientConfigurationError if no project_id can be
found.
dataset_id: a default dataset id to use.
discovery_document: the discovery document to use. If None, one
will be retrieved from the discovery api. If not specified,
the built-in discovery document will be used.
job_property: a list of "key=value" strings defining properties
to apply to all job operations.
trace: a tracing header to inclue in all bigquery api requests.
sync: boolean, when inserting jobs, whether to wait for them to
complete before returning from the insert request.
wait_printer_factory: a function that returns a WaitPrinter.
This will be called for each job that we wait on. See WaitJob().
Raises:
ValueError: if keywords are missing or incorrectly specified.
"""
super(BigqueryClient, self).__init__()
for key, value in kwds.iteritems():
setattr(self, key, value)
self._apiclient = None
for required_flag in ('api', 'api_version'):
if required_flag not in kwds:
raise ValueError('Missing required flag: %s' % (required_flag,))
default_flag_values = {
'project_id': '',
'dataset_id': '',
'discovery_document': _DEFAULT,
'job_property': '',
'trace': None,
'sync': True,
'wait_printer_factory': BigqueryClient.TransitionWaitPrinter,
'job_id_generator': JobIdGeneratorIncrementing(JobIdGeneratorRandom()),
'max_rows_per_request': None,
}
for flagname, default in default_flag_values.iteritems():
if not hasattr(self, flagname):
setattr(self, flagname, default)
if self.dataset_id and not self.project_id:
raise ValueError('Cannot set dataset_id without project_id')
def GetHttp(self):
"""Returns the httplib2 Http to use."""
http = httplib2.Http()
return http
def GetDiscoveryUrl(self):
"""Returns the url to the discovery document for bigquery."""
discovery_url = self.api + '/discovery/v1/apis/{api}/{apiVersion}/rest'
return discovery_url
@property
def apiclient(self):
"""Return the apiclient attached to self."""
if self._apiclient is None:
http = self.credentials.authorize(self.GetHttp())
bigquery_model = BigqueryModel(
trace=self.trace)
bigquery_http = BigqueryHttp.Factory(
bigquery_model)
discovery_document = self.discovery_document
if discovery_document == _DEFAULT:
# Use the api description packed with this client, if one exists.
try:
discovery_document = pkgutil.get_data(
'bigquery_client', 'discovery/%s.bigquery.%s.rest.json'
% (_ToFilename(self.api), self.api_version))
except IOError:
discovery_document = None
if discovery_document is None:
try:
self._apiclient = discovery.build(
'bigquery', self.api_version, http=http,
discoveryServiceUrl=self.GetDiscoveryUrl(),
model=bigquery_model,
requestBuilder=bigquery_http)
except (httplib2.HttpLib2Error, apiclient.errors.HttpError), e:
# We can't find the specified server.
raise BigqueryCommunicationError(
'Cannot contact server. Please try again.\nError: %r'
'\nContent: %s' % (e, e.content))
except IOError, e:
raise BigqueryCommunicationError(
'Cannot contact server. Please try again.\nError: %r' % (e,))
except apiclient.errors.UnknownApiNameOrVersion, e:
# We can't resolve the discovery url for the given server.
raise BigqueryCommunicationError(
'Invalid API name or version: %s' % (str(e),))
else:
self._apiclient = discovery.build_from_document(
discovery_document, http=http,
model=bigquery_model,
requestBuilder=bigquery_http)
return self._apiclient
#################################
## Utility methods
#################################
@staticmethod
def FormatTime(secs):
return time.strftime('%d %b %H:%M:%S', time.localtime(secs))
@staticmethod
def FormatAcl(acl):
"""Format a server-returned ACL for printing."""
acl_entries = {
'OWNER': [],
'WRITER': [],
'READER': [],
'VIEW': [],
}
for entry in acl:
entry = entry.copy()
view = entry.pop('view', None)
if view:
acl_entries['VIEW'].append('%s:%s.%s' % (view.get('projectId'),
view.get('datasetId'),
view.get('tableId')))
else:
role = entry.pop('role', None)
if not role or len(entry.values()) != 1:
raise BigqueryServiceError(
'Invalid ACL returned by server: %s' % (acl,))
for _, value in entry.iteritems():
acl_entries[role].append(value)
result_lines = []
if acl_entries['OWNER']:
result_lines.extend([
'Owners:', ',\n'.join(' %s' % (o,) for o in acl_entries['OWNER'])])
if acl_entries['WRITER']:
result_lines.extend([
'Writers:', ',\n'.join(' %s' % (o,) for o in acl_entries['WRITER'])])
if acl_entries['READER']:
result_lines.extend([
'Readers:', ',\n'.join(' %s' % (o,) for o in acl_entries['READER'])])
if acl_entries['VIEW']:
result_lines.extend([
'Authorized Views:', ',\n'.join(' %s' % (o,) for o in
acl_entries['VIEW'])])
return '\n'.join(result_lines)
@staticmethod
def FormatSchema(schema):
"""Format a schema for printing."""
def PrintFields(fields, indent=0):
"""Print all fields in a schema, recurring as necessary."""
lines = []
for field in fields:
prefix = '| ' * indent
junction = '|' if field.get('type', 'STRING') != 'RECORD' else '+'
entry = '%s- %s: %s' % (
junction, field['name'], field.get('type', 'STRING').lower())
if field.get('mode', 'NULLABLE') != 'NULLABLE':
entry += ' (%s)' % (field['mode'].lower(),)
lines.append(prefix + entry)
if 'fields' in field:
lines.extend(PrintFields(field['fields'], indent + 1))
return lines
return '\n'.join(PrintFields(schema.get('fields', [])))
@staticmethod
def NormalizeWait(wait):
try:
return int(wait)
except ValueError:
raise ValueError('Invalid value for wait: %s' % (wait,))
@staticmethod
def ValidatePrintFormat(print_format):
if print_format not in ['show', 'list', 'view']:
raise ValueError('Unknown format: %s' % (print_format,))
@staticmethod
def _ParseIdentifier(identifier):
"""Parses identifier into a tuple of (possibly empty) identifiers.
This will parse the identifier into a tuple of the form
(project_id, dataset_id, table_id) without doing any validation on
the resulting names; missing names are returned as ''. The
interpretation of these identifiers depends on the context of the
caller. For example, if you know the identifier must be a job_id,
then you can assume dataset_id is the job_id.
Args:
identifier: string, identifier to parse
Returns:
project_id, dataset_id, table_id: (string, string, string)
"""
# We need to handle the case of a lone project identifier of the
# form domain.com:proj separately.
if re.search(r'^\w[\w.]*\.[\w.]+:\w[\w\d_-]*:?$', identifier):
return identifier, '', ''
project_id, _, dataset_and_table_id = identifier.rpartition(':')
if '.' in dataset_and_table_id:
dataset_id, _, table_id = dataset_and_table_id.rpartition('.')
elif project_id:
# Identifier was a project : <something without dots>.
# We must have a dataset id because there was a project
dataset_id = dataset_and_table_id
table_id = ''
else:
# Identifier was just a bare id with no dots or colons.
# Return this as a table_id.
dataset_id = ''
table_id = dataset_and_table_id
return project_id, dataset_id, table_id
def GetProjectReference(self, identifier=''):
"""Determine a project reference from an identifier and self."""
project_id, dataset_id, table_id = BigqueryClient._ParseIdentifier(
identifier)
try:
# ParseIdentifier('foo') is just a table_id, but we want to read
# it as a project_id.
project_id = project_id or table_id or self.project_id
if not dataset_id and project_id:
return ApiClientHelper.ProjectReference.Create(projectId=project_id)
except ValueError:
pass
raise BigqueryClientError('Cannot determine project described by %s' % (
identifier,))
def GetDatasetReference(self, identifier=''):
"""Determine a DatasetReference from an identifier and self."""
project_id, dataset_id, table_id = BigqueryClient._ParseIdentifier(
identifier)
if table_id and not project_id and not dataset_id:
# identifier is 'foo'
project_id = self.project_id
dataset_id = table_id
elif project_id and dataset_id and table_id:
# Identifier was foo::bar.baz.qux.
dataset_id = dataset_id + '.' + table_id
elif project_id and dataset_id and not table_id:
# identifier is 'foo:bar'
pass
elif not identifier:
# identifier is ''
project_id = self.project_id
dataset_id = self.dataset_id
else:
raise BigqueryError('Cannot determine dataset described by %s' % (
identifier,))
try:
return ApiClientHelper.DatasetReference.Create(
projectId=project_id, datasetId=dataset_id)
except ValueError:
raise BigqueryError('Cannot determine dataset described by %s' % (
identifier,))
def GetTableReference(self, identifier=''):
"""Determine a TableReference from an identifier and self."""
project_id, dataset_id, table_id = BigqueryClient._ParseIdentifier(
identifier)
try:
return ApiClientHelper.TableReference.Create(
projectId=project_id or self.project_id,
datasetId=dataset_id or self.dataset_id,
tableId=table_id,
)
except ValueError:
raise BigqueryError('Cannot determine table described by %s' % (
identifier,))
def GetReference(self, identifier=''):
"""Try to deduce a project/dataset/table reference from a string.
If the identifier is not compound, treat it as the most specific
identifier we don't have as a flag, or as the table_id. If it is
compound, fill in any unspecified part.
Args:
identifier: string, Identifier to create a reference for.
Returns:
A valid ProjectReference, DatasetReference, or TableReference.
Raises:
BigqueryError: if no valid reference can be determined.
"""
try:
return self.GetTableReference(identifier)
except BigqueryError:
pass
try:
return self.GetDatasetReference(identifier)
except BigqueryError:
pass
try:
return self.GetProjectReference(identifier)
except BigqueryError:
pass
raise BigqueryError('Cannot determine reference for "%s"' % (identifier,))
# TODO(user): consider introducing job-specific and possibly
# dataset- and project-specific parsers for the case of knowing what
# type we are looking for. Reinterpreting "dataset_id" as "job_id"
# is rather confusing.
def GetJobReference(self, identifier=''):
"""Determine a JobReference from an identifier and self."""
project_id, dataset_id, table_id = BigqueryClient._ParseIdentifier(
identifier)
if table_id and not project_id and not dataset_id:
# identifier is 'foo'
project_id = self.project_id
job_id = table_id
elif project_id and dataset_id and not table_id:
# identifier is 'foo:bar'
job_id = dataset_id
else:
job_id = None
if job_id:
try:
return ApiClientHelper.JobReference.Create(
projectId=project_id, jobId=job_id)
except ValueError:
pass
raise BigqueryError('Cannot determine job described by %s' % (
identifier,))
def GetObjectInfo(self, reference):
"""Get all data returned by the server about a specific object."""
# Projects are handled separately, because we only have
# bigquery.projects.list.
if isinstance(reference, ApiClientHelper.ProjectReference):
projects = self.ListProjects()
for project in projects:
if BigqueryClient.ConstructObjectReference(project) == reference:
project['kind'] = 'bigquery#project'
return project
raise BigqueryNotFoundError('Unknown %r' % (reference,))
if isinstance(reference, ApiClientHelper.JobReference):
return self.apiclient.jobs().get(**dict(reference)).execute()
elif isinstance(reference, ApiClientHelper.DatasetReference):
return self.apiclient.datasets().get(**dict(reference)).execute()
elif isinstance(reference, ApiClientHelper.TableReference):
return self.apiclient.tables().get(**dict(reference)).execute()
else:
raise TypeError('Type of reference must be one of: ProjectReference, '
'JobReference, DatasetReference, or TableReference')
def GetTableSchema(self, table_dict):
table_info = self.apiclient.tables().get(**table_dict).execute()
return table_info.get('schema', {})
def InsertTableRows(self, table_dict, inserts):
"""Insert rows into a table.
Arguments:
table_dict: table reference into which rows are to be inserted.
inserts: array of InsertEntry tuples where insert_id can be None.
Returns:
result of the operation.
"""
def _EncodeInsert(insert):
encoded = dict(json=insert.record)
if insert.insert_id:
encoded['insertId'] = insert.insert_id
return encoded
op = self.apiclient.tabledata().insertAll(
body=dict(rows=map(_EncodeInsert, inserts)),
**table_dict)
return op.execute()
def ReadSchemaAndRows(self, table_dict, start_row=None, max_rows=None):
"""Convenience method to get the schema and rows from a table.
Arguments:
table_dict: table reference dictionary.
start_row: first row to read.
max_rows: number of rows to read.
Returns:
A tuple where the first item is the list of fields and the
second item a list of rows.
Raises:
ValueError: will be raised if start_row is not explicitly provided.
ValueError: will be raised if max_rows is not explicitly provided.
"""
if start_row is None:
raise ValueError('start_row is required')
if max_rows is None:
raise ValueError('max_rows is required')
table_ref = ApiClientHelper.TableReference.Create(**table_dict)
return _TableTableReader(self.apiclient, self.max_rows_per_request,
table_ref).ReadSchemaAndRows(start_row,
max_rows)
def ReadSchemaAndJobRows(self, job_dict, start_row=None, max_rows=None):
"""Convenience method to get the schema and rows from job query result.
Arguments:
job_dict: job reference dictionary.
start_row: first row to read.
max_rows: number of rows to read.
Returns:
A tuple where the first item is the list of fields and the
second item a list of rows.
Raises:
ValueError: will be raised if start_row is not explicitly provided.
ValueError: will be raised if max_rows is not explicitly provided.
"""
if start_row is None:
raise ValueError('start_row is required')
if max_rows is None:
raise ValueError('max_rows is required')
job_ref = ApiClientHelper.JobReference.Create(**job_dict)
reader = _JobTableReader(self.apiclient, self.max_rows_per_request,
job_ref)
return reader.ReadSchemaAndRows(start_row, max_rows)
@staticmethod
def ConfigureFormatter(formatter, reference_type, print_format='list'):
"""Configure a formatter for a given reference type.
If print_format is 'show', configures the formatter with several
additional fields (useful for printing a single record).
Arguments:
formatter: TableFormatter object to configure.
reference_type: Type of object this formatter will be used with.
print_format: Either 'show' or 'list' to control what fields are
included.
Raises:
ValueError: If reference_type or format is unknown.
"""
BigqueryClient.ValidatePrintFormat(print_format)
if reference_type == ApiClientHelper.JobReference:
if print_format == 'list':
formatter.AddColumns(('jobId',))
formatter.AddColumns(
('Job Type', 'State', 'Start Time', 'Duration',))
if print_format == 'show':
formatter.AddColumns(('Bytes Processed',))
elif reference_type == ApiClientHelper.ProjectReference:
if print_format == 'list':
formatter.AddColumns(('projectId',))
formatter.AddColumns(('friendlyName',))
elif reference_type == ApiClientHelper.DatasetReference:
if print_format == 'list':
formatter.AddColumns(('datasetId',))
if print_format == 'show':
formatter.AddColumns(('Last modified', 'ACLs',))
elif reference_type == ApiClientHelper.TableReference:
if print_format == 'list':
formatter.AddColumns(('tableId', 'Type',))
if print_format == 'show':
formatter.AddColumns(('Last modified', 'Schema',
'Total Rows', 'Total Bytes',
'Expiration'))
if print_format == 'view':
formatter.AddColumns(('Query',))
else:
raise ValueError('Unknown reference type: %s' % (
reference_type.__name__,))
@staticmethod
def RaiseError(result):
"""Raises an appropriate BigQuery error given the json error result."""
error = result.get('error', {}).get('errors', [{}])[0]
raise BigqueryError.Create(error, result, [])
@staticmethod
def IsFailedJob(job):
"""Predicate to determine whether or not a job failed."""
return 'errorResult' in job.get('status', {})
@staticmethod
def RaiseIfJobError(job):
"""Raises a BigQueryError if the job is in an error state.
Args:
job: a Job resource.
Returns:
job, if it is not in an error state.
Raises:
BigqueryError: A BigqueryError instance based on the job's error
description.
"""
if BigqueryClient.IsFailedJob(job):
error = job['status']['errorResult']
error_ls = job['status'].get('errors', [])
raise BigqueryError.Create(
error, error, error_ls,
job_ref=BigqueryClient.ConstructObjectReference(job))
return job
@staticmethod
def GetJobTypeName(job_info):
"""Helper for job printing code."""
job_names = set(('extract', 'load', 'query', 'copy'))
try:
return set(job_info.get('configuration', {}).keys()).intersection(
job_names).pop()
except KeyError:
return None
@staticmethod
def ProcessSources(source_string):
"""Take a source string and return a list of URIs.
The list will consist of either a single local filename, which
we check exists and is a file, or a list of gs:// uris.
Args:
source_string: A comma-separated list of URIs.
Returns:
List of one or more valid URIs, as strings.
Raises:
BigqueryClientError: if no valid list of sources can be determined.
"""
sources = [source.strip() for source in source_string.split(',')]
gs_uris = [source for source in sources if source.startswith('gs://')]
if not sources:
raise BigqueryClientError('No sources specified')
if gs_uris:
if len(gs_uris) != len(sources):
raise BigqueryClientError('All URIs must begin with "gs://" if any do.')
return sources
else:
source = sources[0]
if len(sources) > 1:
raise BigqueryClientError(
'Local upload currently supports only one file, found %d' % (
len(sources),))
if not os.path.exists(source):
raise BigqueryClientError('Source file not found: %s' % (source,))
if not os.path.isfile(source):
raise BigqueryClientError('Source path is not a file: %s' % (source,))
return sources
@staticmethod
def ReadSchema(schema):
"""Create a schema from a string or a filename.
If schema does not contain ':' and is the name of an existing
file, read it as a JSON schema. If not, it must be a
comma-separated list of fields in the form name:type.
Args:
schema: A filename or schema.
Returns:
The new schema (as a dict).
Raises:
BigquerySchemaError: If the schema is invalid or the filename does
not exist.
"""
def NewField(entry):
name, _, field_type = entry.partition(':')
if entry.count(':') > 1 or not name.strip():
raise BigquerySchemaError('Invalid schema entry: %s' % (entry,))
return {
'name': name.strip(),
'type': field_type.strip().upper() or 'STRING',
}
if not schema:
raise BigquerySchemaError('Schema cannot be empty')
elif os.path.exists(schema):
with open(schema) as f:
try:
return json.load(f)
except ValueError, e:
raise BigquerySchemaError(
('Error decoding JSON schema from file %s: %s\n'
'To specify a one-column schema, use "name:string".') % (
schema, e))
elif re.match(r'[./\\]', schema) is not None:
# We have something that looks like a filename, but we didn't
# find it. Tell the user about the problem now, rather than wait
# for a round-trip to the server.
raise BigquerySchemaError(
('Error reading schema: "%s" looks like a filename, '
'but was not found.') % (schema,))
else:
return [NewField(entry) for entry in schema.split(',')]
@staticmethod
def _KindToName(kind):
"""Convert a kind to just a type name."""
return kind.partition('#')[2]
@staticmethod
def FormatInfoByKind(object_info):
"""Format a single object_info (based on its 'kind' attribute)."""
kind = BigqueryClient._KindToName(object_info.get('kind'))
if kind == 'job':
return BigqueryClient.FormatJobInfo(object_info)
elif kind == 'project':
return BigqueryClient.FormatProjectInfo(object_info)
elif kind == 'dataset':
return BigqueryClient.FormatDatasetInfo(object_info)
elif kind == 'table':
return BigqueryClient.FormatTableInfo(object_info)
else:
raise ValueError('Unknown object type: %s' % (kind,))
@staticmethod
def FormatJobInfo(job_info):
"""Prepare a job_info for printing.
Arguments:
job_info: Job dict to format.
Returns:
The new job_info.
"""
result = job_info.copy()
reference = BigqueryClient.ConstructObjectReference(result)
result.update(dict(reference))
if 'startTime' in result.get('statistics', {}):
start = int(result['statistics']['startTime']) / 1000
if 'endTime' in result['statistics']:
duration_seconds = int(result['statistics']['endTime']) / 1000 - start
result['Duration'] = str(datetime.timedelta(seconds=duration_seconds))
result['Start Time'] = BigqueryClient.FormatTime(start)
result['Job Type'] = BigqueryClient.GetJobTypeName(result)
result['State'] = result['status']['state']
if result['State'] == 'DONE':
try:
BigqueryClient.RaiseIfJobError(result)
result['State'] = 'SUCCESS'
except BigqueryError:
result['State'] = 'FAILURE'
if 'totalBytesProcessed' in result.get('statistics', {}):
result['Bytes Processed'] = result['statistics']['totalBytesProcessed']
return result
@staticmethod
def FormatProjectInfo(project_info):
"""Prepare a project_info for printing.
Arguments:
project_info: Project dict to format.
Returns:
The new project_info.
"""
result = project_info.copy()
reference = BigqueryClient.ConstructObjectReference(result)
result.update(dict(reference))
return result
@staticmethod
def FormatDatasetInfo(dataset_info):
"""Prepare a dataset_info for printing.
Arguments:
dataset_info: Dataset dict to format.
Returns:
The new dataset_info.
"""
result = dataset_info.copy()
reference = BigqueryClient.ConstructObjectReference(result)
result.update(dict(reference))
if 'lastModifiedTime' in result:
result['Last modified'] = BigqueryClient.FormatTime(
int(result['lastModifiedTime']) / 1000)
if 'access' in result:
result['ACLs'] = BigqueryClient.FormatAcl(result['access'])
return result
@staticmethod
def FormatTableInfo(table_info):
"""Prepare a table_info for printing.
Arguments:
table_info: Table dict to format.
Returns:
The new table_info.
"""
result = table_info.copy()
reference = BigqueryClient.ConstructObjectReference(result)
result.update(dict(reference))
if 'lastModifiedTime' in result:
result['Last modified'] = BigqueryClient.FormatTime(
int(result['lastModifiedTime']) / 1000)
if 'schema' in result:
result['Schema'] = BigqueryClient.FormatSchema(result['schema'])
if 'numBytes' in result:
result['Total Bytes'] = result['numBytes']
if 'numRows' in result:
result['Total Rows'] = result['numRows']
if 'expirationTime' in result:
result['Expiration'] = BigqueryClient.FormatTime(
int(result['expirationTime']) / 1000)
if 'type' in result:
result['Type'] = result['type']
if result['type'] == 'VIEW':
result['Total Bytes'] = '(view)'
result['Total Rows'] = '(view)'
if 'view' in result:
result['Query'] = result['view']['query']
return result
@staticmethod
def ConstructObjectReference(object_info):
"""Construct a Reference from a server response."""
if 'kind' in object_info:
typename = BigqueryClient._KindToName(object_info['kind'])
lower_camel = typename + 'Reference'
if lower_camel not in object_info:
raise ValueError('Cannot find %s in object of type %s: %s' % (
lower_camel, typename, object_info))
else:
keys = [k for k in object_info if k.endswith('Reference')]
if len(keys) != 1:
raise ValueError('Expected one Reference, found %s: %s' % (
len(keys), keys))
lower_camel = keys[0]
upper_camel = lower_camel[0].upper() + lower_camel[1:]
reference_type = getattr(ApiClientHelper, upper_camel, None)
if reference_type is None:
raise ValueError('Unknown reference type: %s' % (typename,))
return reference_type.Create(**object_info[lower_camel])
@staticmethod
def ConstructObjectInfo(reference):
"""Construct an Object from an ObjectReference."""
typename = reference.__class__.__name__
lower_camel = typename[0].lower() + typename[1:]
return {lower_camel: dict(reference)}
def _PrepareListRequest(self, reference, max_results=None, page_token=None):
request = dict(reference)
if max_results is not None:
request['maxResults'] = max_results
if page_token is not None:
request['pageToken'] = page_token
return request
def _NormalizeProjectReference(self, reference):
if reference is None:
try:
return self.GetProjectReference()
except BigqueryClientError:
raise BigqueryClientError(
'Project reference or a default project is required')
return reference
def ListJobRefs(self, **kwds):
return map( # pylint: disable=g-long-lambda
BigqueryClient.ConstructObjectReference, self.ListJobs(**kwds))
def ListJobs(self, reference=None,
max_results=None, state_filter=None,
all_users=None):
"""Return a list of jobs.
Args:
reference: The ProjectReference to list jobs for.
max_results: The maximum number of jobs to return.
state_filter: A single state filter or a list of filters to
apply. If not specified, no filtering is applied.
all_users: Whether to list jobs for all users of the project. Requesting
user must be an owner of the project to list all jobs.
Returns:
A list of jobs.
"""
reference = self._NormalizeProjectReference(reference)
_Typecheck(reference, ApiClientHelper.ProjectReference, method='ListJobs')
request = self._PrepareListRequest(reference, max_results, None)
if state_filter is not None:
# The apiclient wants enum values as lowercase strings.
if isinstance(state_filter, basestring):
state_filter = state_filter.lower()
else:
state_filter = [s.lower() for s in state_filter]
_ApplyParameters(request, projection='full',
state_filter=state_filter, all_users=all_users)
jobs = self.apiclient.jobs().list(**request).execute()
return jobs.get('jobs', [])
def ListProjectRefs(self, **kwds):
"""List the project references this user has access to."""
return map( # pylint: disable=g-long-lambda
BigqueryClient.ConstructObjectReference, self.ListProjects(**kwds))
def ListProjects(self, max_results=None, page_token=None):
"""List the projects this user has access to."""
request = self._PrepareListRequest({}, max_results, page_token)
result = self.apiclient.projects().list(**request).execute()
return result.get('projects', [])
def ListDatasetRefs(self, **kwds):
return map( # pylint: disable=g-long-lambda
BigqueryClient.ConstructObjectReference, self.ListDatasets(**kwds))
def ListDatasets(self, reference=None, max_results=None, page_token=None,
list_all=None):
"""List the datasets associated with this reference."""
reference = self._NormalizeProjectReference(reference)
_Typecheck(reference, ApiClientHelper.ProjectReference,
method='ListDatasets')
request = self._PrepareListRequest(reference, max_results, page_token)
if list_all is not None:
request['all'] = list_all
result = self.apiclient.datasets().list(**request).execute()
results = result.get('datasets', [])
if max_results is not None:
while 'nextPageToken' in result and len(results) < max_results:
request = self._PrepareListRequest(
reference, max_results, result['nextPageToken'])
if list_all is not None:
request['all'] = list_all
result = self.apiclient.datasets().list(**request).execute()
results.extend(result.get('datasets', []))
return results
def ListTableRefs(self, **kwds):
return map( # pylint: disable=g-long-lambda
BigqueryClient.ConstructObjectReference, self.ListTables(**kwds))
def ListTables(self, reference, max_results=None, page_token=None):
"""List the tables associated with this reference."""
_Typecheck(reference, ApiClientHelper.DatasetReference,
method='ListTables')
request = self._PrepareListRequest(reference, max_results, page_token)
result = self.apiclient.tables().list(**request).execute()
results = result.get('tables', [])
if max_results is not None:
while 'nextPageToken' in result and len(results) < max_results:
request = self._PrepareListRequest(
reference, max_results, result['nextPageToken'])
result = self.apiclient.tables().list(**request).execute()
results.extend(result.get('tables', []))
return results
#################################
## Table and dataset management
#################################
def CopyTable(self, source_references, dest_reference,
create_disposition=None, write_disposition=None,
ignore_already_exists=False, **kwds):
"""Copies a table.
Args:
source_references: TableReferences of source tables.
dest_reference: TableReference of destination table.
create_disposition: Optional. Specifies the create_disposition for
the dest_reference.
write_disposition: Optional. Specifies the write_disposition for
the dest_reference.
ignore_already_exists: Whether to ignore "already exists" errors.
**kwds: Passed on to ExecuteJob.
Returns:
The job description, or None for ignored errors.
Raises:
BigqueryDuplicateError: when write_disposition 'WRITE_EMPTY' is
specified and the dest_reference table already exists.
"""
for src_ref in source_references:
_Typecheck(src_ref, ApiClientHelper.TableReference,
method='CopyTable')
_Typecheck(dest_reference, ApiClientHelper.TableReference,
method='CopyTable')
copy_config = {
'destinationTable': dict(dest_reference),
'sourceTables': [dict(src_ref) for src_ref in source_references],
}
_ApplyParameters(copy_config, create_disposition=create_disposition,
write_disposition=write_disposition)
try:
return self.ExecuteJob({'copy': copy_config}, **kwds)
except BigqueryDuplicateError, e:
if ignore_already_exists:
return None
raise e
def DatasetExists(self, reference):
_Typecheck(reference, ApiClientHelper.DatasetReference,
method='DatasetExists')
try:
self.apiclient.datasets().get(**dict(reference)).execute()
return True
except BigqueryNotFoundError:
return False
def TableExists(self, reference):
_Typecheck(reference, ApiClientHelper.TableReference, method='TableExists')
try:
self.apiclient.tables().get(**dict(reference)).execute()
return True
except BigqueryNotFoundError:
return False
def CreateDataset(self, reference, ignore_existing=False, description=None,
friendly_name=None, acl=None):
"""Create a dataset corresponding to DatasetReference.
Args:
reference: the DatasetReference to create.
ignore_existing: (boolean, default False) If False, raise
an exception if the dataset already exists.
description: an optional dataset description.
friendly_name: an optional friendly name for the dataset.
acl: an optional ACL for the dataset, as a list of dicts.
Raises:
TypeError: if reference is not a DatasetReference.
BigqueryDuplicateError: if reference exists and ignore_existing
is False.
"""
_Typecheck(reference, ApiClientHelper.DatasetReference,
method='CreateDataset')
body = BigqueryClient.ConstructObjectInfo(reference)
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if acl is not None:
body['access'] = acl
try:
self.apiclient.datasets().insert(
body=body,
**dict(reference.GetProjectReference())).execute()
except BigqueryDuplicateError:
if not ignore_existing:
raise
def CreateTable(self, reference, ignore_existing=False, schema=None,
description=None, friendly_name=None, expiration=None,
view_query=None):
"""Create a table corresponding to TableReference.
Args:
reference: the TableReference to create.
ignore_existing: (boolean, default False) If False, raise
an exception if the dataset already exists.
schema: an optional schema for tables.
description: an optional description for tables or views.
friendly_name: an optional friendly name for the table.
expiration: optional expiration time in milliseconds since the epoch for
tables or views.
view_query: an optional Sql query for views.
Raises:
TypeError: if reference is not a TableReference.
BigqueryDuplicateError: if reference exists and ignore_existing
is False.
"""
_Typecheck(reference, ApiClientHelper.TableReference, method='CreateTable')
try:
body = BigqueryClient.ConstructObjectInfo(reference)
if schema:
body['schema'] = {'fields': schema}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
if view_query is not None:
body['view'] = {'query': view_query}
self.apiclient.tables().insert(
body=body,
**dict(reference.GetDatasetReference())).execute()
except BigqueryDuplicateError:
if not ignore_existing:
raise
def UpdateTable(self, reference, schema=None,
description=None, friendly_name=None, expiration=None,
view_query=None):
"""Updates a table.
Args:
reference: the TableReference to update.
schema: an optional schema for tables.
description: an optional description for tables or views.
friendly_name: an optional friendly name for the table.
expiration: optional expiration time in milliseconds since the epoch for
tables or views.
view_query: an optional Sql query to update a view.
Raises:
TypeError: if reference is not a TableReference.
"""
_Typecheck(reference, ApiClientHelper.TableReference, method='UpdateTable')
body = BigqueryClient.ConstructObjectInfo(reference)
if schema:
body['schema'] = {'fields': schema}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
if view_query is not None:
body['view'] = {'query': view_query}
self.apiclient.tables().patch(body=body, **dict(reference)).execute()
def UpdateDataset(self, reference,
description=None, friendly_name=None, acl=None):
"""Updates a dataset.
Args:
reference: the DatasetReference to update.
description: an optional dataset description.
friendly_name: an optional friendly name for the dataset.
acl: an optional ACL for the dataset, as a list of dicts.
Raises:
TypeError: if reference is not a DatasetReference.
"""
_Typecheck(reference, ApiClientHelper.DatasetReference,
method='UpdateDataset')
body = BigqueryClient.ConstructObjectInfo(reference)
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if acl is not None:
body['access'] = acl
self.apiclient.datasets().patch(body=body, **dict(reference)).execute()
def DeleteDataset(self, reference, ignore_not_found=False,
delete_contents=None):
"""Deletes DatasetReference reference.
Args:
reference: the DatasetReference to delete.
ignore_not_found: Whether to ignore "not found" errors.
delete_contents: [Boolean] Whether to delete the contents of
non-empty datasets. If not specified and the dataset has
tables in it, the delete will fail. If not specified,
the server default applies.
Raises:
TypeError: if reference is not a DatasetReference.
BigqueryNotFoundError: if reference does not exist and
ignore_not_found is False.
"""
_Typecheck(reference, ApiClientHelper.DatasetReference,
method='DeleteDataset')
args = dict(reference)
if delete_contents is not None:
args['deleteContents'] = delete_contents
try:
self.apiclient.datasets().delete(**args).execute()
except BigqueryNotFoundError:
if not ignore_not_found:
raise
def DeleteTable(self, reference, ignore_not_found=False):
"""Deletes TableReference reference.
Args:
reference: the TableReference to delete.
ignore_not_found: Whether to ignore "not found" errors.
Raises:
TypeError: if reference is not a TableReference.
BigqueryNotFoundError: if reference does not exist and
ignore_not_found is False.
"""
_Typecheck(reference, ApiClientHelper.TableReference, method='DeleteTable')
try:
self.apiclient.tables().delete(**dict(reference)).execute()
except BigqueryNotFoundError:
if not ignore_not_found:
raise
#################################
## Job control
#################################
def StartJob(self, configuration,
project_id=None, upload_file=None, job_id=None):
"""Start a job with the given configuration.
Args:
configuration: The configuration for a job.
project_id: The project_id to run the job under. If None,
self.project_id is used.
upload_file: A file to include as a media upload to this request.
Only valid on job requests that expect a media upload file.
job_id: A unique job_id to use for this job. If a
JobIdGenerator, a job id will be generated from the job configuration.
If None, a unique job_id will be created for this request.
Returns:
The job resource returned from the insert job request. If there is an
error, the jobReference field will still be filled out with the job
reference used in the request.
Raises:
BigqueryClientConfigurationError: if project_id and
self.project_id are None.
"""
project_id = project_id or self.project_id
if not project_id:
raise BigqueryClientConfigurationError(
'Cannot start a job without a project id.')
configuration = configuration.copy()
if self.job_property:
configuration['properties'] = dict(
prop.partition('=')[0::2] for prop in self.job_property)
job_request = {'configuration': configuration}
# Use the default job id generator if no job id was supplied.
job_id = job_id or self.job_id_generator
if isinstance(job_id, JobIdGenerator):
job_id = job_id.Generate(configuration)
if job_id is not None:
job_reference = {'jobId': job_id, 'projectId': project_id}
job_request['jobReference'] = job_reference
media_upload = ''
if upload_file:
resumable = True
media_upload = http_request.MediaFileUpload(
filename=upload_file, mimetype='application/octet-stream',
resumable=resumable)
result = self.apiclient.jobs().insert(
body=job_request, media_body=media_upload,
projectId=project_id).execute()
return result
def _StartQueryRpc(self,
query,
dry_run=None,
use_cache=None,
preserve_nulls=None,
max_results=None,
timeout_ms=None,
min_completion_ratio=None,
project_id=None,
**kwds):
"""Executes the given query using the rpc-style query api.
Args:
query: Query to execute.
dry_run: Optional. Indicates whether the query will only be validated and
return processing statistics instead of actually running.
use_cache: Optional. Whether to use the query cache.
Caching is best-effort only and you should not make
assumptions about whether or how long a query result will be cached.
preserve_nulls: Optional. Indicates whether to preserve nulls in input
data. Temporary flag; will be removed in a future version.
max_results: Maximum number of results to return.
timeout_ms: Timeout, in milliseconds, for the call to query().
min_completion_ratio: Optional. Specifies the the minimum fraction of
data that must be scanned before a query returns. This value should be
between 0.0 and 1.0 inclusive.
project_id: Project id to use.
**kwds: Extra keyword arguments passed directly to jobs.Query().
Returns:
The query response.
Raises:
BigqueryClientConfigurationError: if project_id and
self.project_id are None.
"""
project_id = project_id or self.project_id
if not project_id:
raise BigqueryClientConfigurationError(
'Cannot run a query without a project id.')
request = {'query': query}
if self.dataset_id:
request['defaultDataset'] = dict(self.GetDatasetReference())
_ApplyParameters(
request,
preserve_nulls=preserve_nulls,
use_query_cache=use_cache,
timeout_ms=timeout_ms,
max_results=max_results,
min_completion_ratio=min_completion_ratio)
_ApplyParameters(request, dry_run=dry_run)
return self.apiclient.jobs().query(
body=request, projectId=project_id, **kwds).execute()
def GetQueryResults(self, job_id=None, project_id=None,
max_results=None, timeout_ms=None):
"""Waits for a query to complete, once.
Args:
job_id: The job id of the query job that we are waiting to complete.
project_id: The project id of the query job.
max_results: The maximum number of results.
timeout_ms: The number of milliseconds to wait for the query to complete.
Returns:
The getQueryResults() result.
Raises:
BigqueryClientConfigurationError: if project_id and
self.project_id are None.
"""
project_id = project_id or self.project_id
if not project_id:
raise BigqueryClientConfigurationError(
'Cannot get query results without a project id.')
kwds = {}
_ApplyParameters(kwds,
job_id=job_id,
project_id=project_id,
timeout_ms=timeout_ms,
max_results=max_results)
return self.apiclient.jobs().getQueryResults(**kwds).execute()
def RunJobSynchronously(self, configuration, project_id=None,
upload_file=None, job_id=None):
result = self.StartJob(configuration, project_id=project_id,
upload_file=upload_file, job_id=job_id)
if result['status']['state'] != 'DONE':
job_reference = BigqueryClient.ConstructObjectReference(result)
result = self.WaitJob(job_reference)
return self.RaiseIfJobError(result)
def ExecuteJob(self, configuration, sync=None,
project_id=None, upload_file=None, job_id=None):
"""Execute a job, possibly waiting for results."""
if sync is None:
sync = self.sync
if sync:
job = self.RunJobSynchronously(
configuration, project_id=project_id, upload_file=upload_file,
job_id=job_id)
else:
job = self.StartJob(
configuration, project_id=project_id, upload_file=upload_file,
job_id=job_id)
self.RaiseIfJobError(job)
return job
class WaitPrinter(object):
"""Base class that defines the WaitPrinter interface."""
def Print(self, job_id, wait_time, status):
"""Prints status for the current job we are waiting on.
Args:
job_id: the identifier for this job.
wait_time: the number of seconds we have been waiting so far.
status: the status of the job we are waiting for.
"""
raise NotImplementedError('Subclass must implement Print')
def Done(self):
"""Waiting is done and no more Print calls will be made.
This function should handle the case of Print not being called.
"""
raise NotImplementedError('Subclass must implement Done')
class WaitPrinterHelper(WaitPrinter):
"""A Done implementation that prints based off a property."""
print_on_done = False
def Done(self):
if self.print_on_done:
print
class QuietWaitPrinter(WaitPrinterHelper):
"""A WaitPrinter that prints nothing."""
def Print(self, unused_job_id, unused_wait_time, unused_status):
pass
class VerboseWaitPrinter(WaitPrinterHelper):
"""A WaitPrinter that prints every update."""
def Print(self, job_id, wait_time, status):
self.print_on_done = True
print '\rWaiting on %s ... (%ds) Current status: %-7s' % (
job_id, wait_time, status),
sys.stderr.flush()
class TransitionWaitPrinter(VerboseWaitPrinter):
"""A WaitPrinter that only prints status change updates."""
_previous_status = None
def Print(self, job_id, wait_time, status):
if status != self._previous_status:
self._previous_status = status
super(BigqueryClient.TransitionWaitPrinter, self).Print(
job_id, wait_time, status)
def WaitJob(self, job_reference, status='DONE',
wait=sys.maxint, wait_printer_factory=None):
"""Poll for a job to run until it reaches the requested status.
Arguments:
job_reference: JobReference to poll.
status: (optional, default 'DONE') Desired job status.
wait: (optional, default maxint) Max wait time.
wait_printer_factory: (optional, defaults to
self.wait_printer_factory) Returns a subclass of WaitPrinter
that will be called after each job poll.
Returns:
The job object returned by the final status call.
Raises:
StopIteration: If polling does not reach the desired state before
timing out.
ValueError: If given an invalid wait value.
"""
_Typecheck(job_reference, ApiClientHelper.JobReference, method='WaitJob')
start_time = time.time()
job = None
if wait_printer_factory:
printer = wait_printer_factory()
else:
printer = self.wait_printer_factory()
# This is a first pass at wait logic: we ping at 1s intervals a few
# times, then increase to max(3, max_wait), and then keep waiting
# that long until we've run out of time.
waits = itertools.chain(
itertools.repeat(1, 8),
xrange(2, 30, 3),
itertools.repeat(30))
current_wait = 0
current_status = 'UNKNOWN'
while current_wait <= wait:
try:
done, job = self.PollJob(job_reference, status=status, wait=wait)
current_status = job['status']['state']
if done:
printer.Print(job_reference.jobId, current_wait, current_status)
break
except BigqueryCommunicationError, e:
# Communication errors while waiting on a job are okay.
logging.warning('Transient error during job status check: %s', e)
except BigqueryBackendError, e:
# Temporary server errors while waiting on a job are okay.
logging.warning('Transient error during job status check: %s', e)
for _ in xrange(waits.next()):
current_wait = time.time() - start_time
printer.Print(job_reference.jobId, current_wait, current_status)
time.sleep(1)
else:
raise StopIteration(
'Wait timed out. Operation not finished, in state %s' % (
current_status,))
printer.Done()
return job
def PollJob(self, job_reference, status='DONE', wait=0):
"""Poll a job once for a specific status.
Arguments:
job_reference: JobReference to poll.
status: (optional, default 'DONE') Desired job status.
wait: (optional, default 0) Max server-side wait time for one poll call.
Returns:
Tuple (in_state, job) where in_state is True if job is
in the desired state.
Raises:
ValueError: If given an invalid wait value.
"""
_Typecheck(job_reference, ApiClientHelper.JobReference, method='PollJob')
wait = BigqueryClient.NormalizeWait(wait)
job = self.apiclient.jobs().get(**dict(job_reference)).execute()
current = job['status']['state']
return (current == status, job)
#################################
## Wrappers for job types
#################################
def RunQuery(self, **kwds):
"""Run a query job synchronously, and return the result.
Args:
**kwds: Passed on to self.Query.
Returns:
The rows in the query result as a list.
"""
new_kwds = dict(kwds)
new_kwds['sync'] = True
job = self.Query(**new_kwds)
return self.ReadSchemaAndJobRows(job['jobReference'])
def RunQueryRpc(self,
query,
dry_run=None,
use_cache=None,
preserve_nulls=None,
max_results=None,
wait=sys.maxint,
min_completion_ratio=None,
wait_printer_factory=None,
max_single_wait=None,
**kwds):
"""Executes the given query using the rpc-style query api.
Args:
query: Query to execute.
dry_run: Optional. Indicates whether the query will only be validated and
return processing statistics instead of actually running.
use_cache: Optional. Whether to use the query cache.
Caching is best-effort only and you should not make
assumptions about whether or how long a query result will be cached.
preserve_nulls: Optional. Indicates whether to preserve nulls in input
data. Temporary flag; will be removed in a future version.
max_results: Optional. Maximum number of results to return.
wait: (optional, default maxint) Max wait time in seconds.
min_completion_ratio: Optional. Specifies the the minimum fraction of
data that must be scanned before a query returns. This value should be
between 0.0 and 1.0 inclusive.
wait_printer_factory: (optional, defaults to
self.wait_printer_factory) Returns a subclass of WaitPrinter
that will be called after each job poll.
max_single_wait: Optional. Maximum number of seconds to wait for each call
to query() / getQueryResults().
**kwds: Passed directly to self.ExecuteSyncQuery.
Raises:
BigqueryClientError: if no query is provided.
StopIteration: if the query does not complete within wait seconds.
Returns:
The a tuple containing the schema fields and list of results of the query.
"""
if not self.sync:
raise BigqueryClientError('Running RPC-style query asynchronously is '
'not supported')
if not query:
raise BigqueryClientError('No query string provided')
if wait_printer_factory:
printer = wait_printer_factory()
else:
printer = self.wait_printer_factory()
start_time = time.time()
elapsed_time = 0
job_reference = None
current_wait_ms = None
while True:
try:
elapsed_time = 0 if job_reference is None else time.time() - start_time
remaining_time = wait - elapsed_time
if max_single_wait is not None:
# Compute the current wait, being careful about overflow, since
# remaining_time may be counting down from sys.maxint.
current_wait_ms = int(min(remaining_time, max_single_wait) * 1000)
if current_wait_ms < 0:
current_wait_ms = sys.maxint
if remaining_time < 0:
raise StopIteration('Wait timed out. Query not finished.')
if job_reference is None:
# We haven't yet run a successful Query(), so we don't
# have a job id to check on.
result = self._StartQueryRpc(
query=query,
preserve_nulls=preserve_nulls,
use_cache=use_cache,
dry_run=dry_run,
min_completion_ratio=min_completion_ratio,
timeout_ms=current_wait_ms,
max_results=0,
**kwds)
job_reference = ApiClientHelper.JobReference.Create(
**result['jobReference'])
else:
# The query/getQueryResults methods do not return the job state,
# so we just print 'RUNNING' while we are actively waiting.
printer.Print(job_reference.jobId, elapsed_time, 'RUNNING')
result = self.GetQueryResults(
job_reference.jobId,
max_results=0,
timeout_ms=current_wait_ms)
if result['jobComplete']:
return self.ReadSchemaAndJobRows(dict(job_reference),
start_row=0,
max_rows=max_results)
except BigqueryCommunicationError, e:
# Communication errors while waiting on a job are okay.
logging.warning('Transient error during query: %s', e)
except BigqueryBackendError, e:
# Temporary server errors while waiting on a job are okay.
logging.warning('Transient error during query: %s', e)
def Query(self, query,
destination_table=None,
create_disposition=None,
write_disposition=None,
priority=None,
preserve_nulls=None,
allow_large_results=None,
dry_run=None,
use_cache=None,
min_completion_ratio=None,
flatten_results=None,
**kwds):
# pylint: disable=g-doc-args
"""Execute the given query, returning the created job.
The job will execute synchronously if sync=True is provided as an
argument or if self.sync is true.
Args:
query: Query to execute.
destination_table: (default None) If provided, send the results to the
given table.
create_disposition: Optional. Specifies the create_disposition for
the destination_table.
write_disposition: Optional. Specifies the write_disposition for
the destination_table.
priority: Optional. Priority to run the query with. Either
'INTERACTIVE' (default) or 'BATCH'.
preserve_nulls: Optional. Indicates whether to preserve nulls in input
data. Temporary flag; will be removed in a future version.
allow_large_results: Enables larger destination table sizes.
dry_run: Optional. Indicates whether the query will only be validated and
return processing statistics instead of actually running.
use_cache: Optional. Whether to use the query cache. If create_disposition
is CREATE_NEVER, will only run the query if the result is already
cached. Caching is best-effort only and you should not make
assumptions about whether or how long a query result will be cached.
min_completion_ratio: Optional. Specifies the the minimum fraction of
data that must be scanned before a query returns. This value should be
between 0.0 and 1.0 inclusive.
flatten_results: Whether to flatten nested and repeated fields in the
result schema. If not set, the default behavior is to flatten.
**kwds: Passed on to self.ExecuteJob.
Raises:
BigqueryClientError: if no query is provided.
Returns:
The resulting job info.
"""
if not query:
raise BigqueryClientError('No query string provided')
query_config = {'query': query}
if self.dataset_id:
query_config['defaultDataset'] = dict(self.GetDatasetReference())
if destination_table:
try:
reference = self.GetTableReference(destination_table)
except BigqueryError, e:
raise BigqueryError('Invalid value %s for destination_table: %s' % (
destination_table, e))
query_config['destinationTable'] = dict(reference)
_ApplyParameters(
query_config,
allow_large_results=allow_large_results,
create_disposition=create_disposition,
preserve_nulls=preserve_nulls,
priority=priority,
write_disposition=write_disposition,
use_query_cache=use_cache,
flatten_results=flatten_results,
min_completion_ratio=min_completion_ratio)
request = {'query': query_config}
_ApplyParameters(request, dry_run=dry_run)
return self.ExecuteJob(request, **kwds)
def Load(self, destination_table_reference, source,
schema=None, create_disposition=None, write_disposition=None,
field_delimiter=None, skip_leading_rows=None, encoding=None,
quote=None, max_bad_records=None, allow_quoted_newlines=None,
source_format=None, allow_jagged_rows=None,
ignore_unknown_values=None,
**kwds):
"""Load the given data into BigQuery.
The job will execute synchronously if sync=True is provided as an
argument or if self.sync is true.
Args:
destination_table_reference: TableReference to load data into.
source: String specifying source data to load.
schema: (default None) Schema of the created table. (Can be left blank
for append operations.)
create_disposition: Optional. Specifies the create_disposition for
the destination_table_reference.
write_disposition: Optional. Specifies the write_disposition for
the destination_table_reference.
field_delimiter: Optional. Specifies the single byte field delimiter.
skip_leading_rows: Optional. Number of rows of initial data to skip.
encoding: Optional. Specifies character encoding of the input data.
May be "UTF-8" or "ISO-8859-1". Defaults to UTF-8 if not specified.
quote: Optional. Quote character to use. Default is '"'. Note that
quoting is done on the raw binary data before encoding is applied.
max_bad_records: Optional. Maximum number of bad records that should
be ignored before the entire job is aborted.
allow_quoted_newlines: Optional. Whether to allow quoted newlines in CSV
import data.
source_format: Optional. Format of source data. May be "CSV",
"DATASTORE_BACKUP", or "NEWLINE_DELIMITED_JSON".
allow_jagged_rows: Optional. Whether to allow missing trailing optional
columns in CSV import data.
ignore_unknown_values: Optional. Whether to allow extra, unrecognized
values in CSV or JSON data.
**kwds: Passed on to self.ExecuteJob.
Returns:
The resulting job info.
"""
_Typecheck(destination_table_reference, ApiClientHelper.TableReference)
load_config = {'destinationTable': dict(destination_table_reference)}
sources = BigqueryClient.ProcessSources(source)
if sources[0].startswith('gs://'):
load_config['sourceUris'] = sources
upload_file = None
else:
upload_file = sources[0]
if schema is not None:
load_config['schema'] = {'fields': BigqueryClient.ReadSchema(schema)}
_ApplyParameters(
load_config, create_disposition=create_disposition,
write_disposition=write_disposition, field_delimiter=field_delimiter,
skip_leading_rows=skip_leading_rows, encoding=encoding,
quote=quote, max_bad_records=max_bad_records,
source_format=source_format,
allow_quoted_newlines=allow_quoted_newlines,
allow_jagged_rows=allow_jagged_rows,
ignore_unknown_values=ignore_unknown_values)
return self.ExecuteJob(configuration={'load': load_config},
upload_file=upload_file, **kwds)
def Extract(self, source_table, destination_uris,
print_header=None, field_delimiter=None,
destination_format=None, compression=None,
**kwds):
"""Extract the given table from BigQuery.
The job will execute synchronously if sync=True is provided as an
argument or if self.sync is true.
Args:
source_table: TableReference to read data from.
destination_uris: String specifying one or more destination locations,
separated by commas.
print_header: Optional. Whether to print out a header row in the results.
field_delimiter: Optional. Specifies the single byte field delimiter.
destination_format: Optional. Format to extract table to. May be "CSV",
"AVRO", or "NEWLINE_DELIMITED_JSON".
compression: Optional. The compression type to use for exported files.
Possible values include "GZIP" and "NONE". The default value is NONE.
**kwds: Passed on to self.ExecuteJob.
Returns:
The resulting job info.
Raises:
BigqueryClientError: if required parameters are invalid.
"""
_Typecheck(source_table, ApiClientHelper.TableReference)
uris = destination_uris.split(',')
for uri in uris:
if not uri.startswith('gs://'):
raise BigqueryClientError(
'Illegal URI: {}. Extract URI must start with "gs://".'.format(uri))
extract_config = {'sourceTable': dict(source_table)}
_ApplyParameters(
extract_config, destination_uris=uris,
destination_format=destination_format,
print_header=print_header, field_delimiter=field_delimiter,
compression=compression)
return self.ExecuteJob(configuration={'extract': extract_config}, **kwds)
class _TableReader(object):
"""Base class that defines the TableReader interface.
_TableReaders provide a way to read paginated rows and schemas from a table.
"""
def ReadRows(self, start_row=0, max_rows=None):
"""Read ad most max_rows rows from a table.
Args:
start_row: first row to return.
max_rows: maximum number of rows to return.
Raises:
BigqueryInterfaceError: when bigquery returns something unexpected.
Returns:
list of rows, each of which is a list of field values.
"""
(_, rows) = self.ReadSchemaAndRows(start_row=start_row, max_rows=max_rows)
return rows
def ReadSchemaAndRows(self, start_row, max_rows):
"""Read at most max_rows rows from a table and the schema.
Args:
start_row: first row to read.
max_rows: maximum number of rows to return.
Raises:
BigqueryInterfaceError: when bigquery returns something unexpected.
ValueError: when start_row is None.
ValueError: when max_rows is None.
Returns:
A tuple where the first item is the list of fields and the
second item a list of rows.
"""
if start_row is None:
raise ValueError('start_row is required')
if max_rows is None:
raise ValueError('max_rows is required')
page_token = None
rows = []
schema = {}
while len(rows) < max_rows:
rows_to_read = max_rows - len(rows)
if self.max_rows_per_request:
rows_to_read = min(self.max_rows_per_request, rows_to_read)
(more_rows, page_token, current_schema) = self._ReadOnePage(
None if page_token else start_row,
max_rows=None if page_token else rows_to_read,
page_token=page_token)
if not schema and current_schema:
schema = current_schema.get('fields', [])
for row in more_rows:
rows.append(self._ConvertFromFV(schema, row))
start_row += 1
if not page_token or not more_rows:
break
return (schema, rows)
def _ConvertFromFV(self, schema, row):
"""Converts from FV format to possibly nested lists of values."""
if not row:
return None
values = [entry.get('v', '') for entry in row.get('f', [])]
result = []
for field, v in zip(schema, values):
if field['type'].upper() == 'RECORD':
# Nested field.
subfields = field.get('fields', [])
if field.get('mode', 'NULLABLE').upper() == 'REPEATED':
# Repeated and nested. Convert the array of v's of FV's.
result.append([self._ConvertFromFV(
subfields, subvalue.get('v', '')) for subvalue in v])
else:
# Nested non-repeated field. Convert the nested f from FV.
result.append(self._ConvertFromFV(subfields, v))
elif field.get('mode', 'NULLABLE').upper() == 'REPEATED':
# Repeated but not nested: an array of v's.
result.append([subvalue.get('v', '') for subvalue in v])
else:
# Normal flat field.
result.append(v)
return result
def __str__(self):
return self._GetPrintContext()
def __repr__(self):
return self._GetPrintContext()
def _GetPrintContext(self):
"""Returns context for what is being read."""
raise NotImplementedError('Subclass must implement GetPrintContext')
def _ReadOnePage(self, start_row, max_rows, page_token=None):
"""Read one page of data, up to max_rows rows.
Assumes that the table is ready for reading. Will signal an error otherwise.
Args:
start_row: first row to read.
max_rows: maximum number of rows to return.
page_token: Optional. current page token.
Returns:
tuple of:
rows: the actual rows of the table, in f,v format.
page_token: the page token of the next page of results.
schema: the schema of the table.
"""
raise NotImplementedError('Subclass must implement _ReadOnePage')
class _TableTableReader(_TableReader):
"""A TableReader that reads from a table."""
def __init__(self, local_apiclient, max_rows_per_request, table_ref):
self.table_ref = table_ref
self.max_rows_per_request = max_rows_per_request
self._apiclient = local_apiclient
def _GetPrintContext(self):
return '%r' % (self.table_ref,)
def _ReadOnePage(self, start_row, max_rows, page_token=None):
kwds = dict(self.table_ref)
kwds['maxResults'] = max_rows
if page_token:
kwds['pageToken'] = page_token
else:
kwds['startIndex'] = start_row
data = self._apiclient.tabledata().list(**kwds).execute()
page_token = data.get('pageToken', None)
rows = data.get('rows', [])
kwds = dict(self.table_ref)
table_info = self._apiclient.tables().get(**kwds).execute()
schema = table_info.get('schema', {})
return (rows, page_token, schema)
class _JobTableReader(_TableReader):
"""A TableReader that reads from a completed job."""
def __init__(self, local_apiclient, max_rows_per_request, job_ref):
self.job_ref = job_ref
self.max_rows_per_request = max_rows_per_request
self._apiclient = local_apiclient
def _GetPrintContext(self):
return '%r' % (self.job_ref,)
def _ReadOnePage(self, start_row, max_rows, page_token=None):
kwds = dict(self.job_ref)
kwds['maxResults'] = max_rows
# Sets the timeout to 0 because we assume the table is already ready.
kwds['timeoutMs'] = 0
if page_token:
kwds['pageToken'] = page_token
else:
kwds['startIndex'] = start_row
data = self._apiclient.jobs().getQueryResults(**kwds).execute()
if not data['jobComplete']:
raise BigqueryError('Job %s is not done' % (self,))
page_token = data.get('pageToken', None)
schema = data.get('schema', None)
rows = data.get('rows', [])
return (rows, page_token, schema)
class ApiClientHelper(object):
"""Static helper methods and classes not provided by the discovery client."""
def __init__(self, *unused_args, **unused_kwds):
raise NotImplementedError('Cannot instantiate static class ApiClientHelper')
class Reference(object):
"""Base class for Reference objects returned by apiclient."""
_required_fields = set()
_format_str = ''
def __init__(self, **kwds):
if type(self) == ApiClientHelper.Reference:
raise NotImplementedError(
'Cannot instantiate abstract class ApiClientHelper.Reference')
for name in self._required_fields:
if not kwds.get(name, ''):
raise ValueError('Missing required argument %s to %s' % (
name, self.__class__.__name__))
setattr(self, name, kwds[name])
@classmethod
def Create(cls, **kwds):
"""Factory method for this class."""
args = dict((k, v) for k, v in kwds.iteritems()
if k in cls._required_fields)
return cls(**args)
def __iter__(self):
return ((name, getattr(self, name)) for name in self._required_fields)
def __str__(self):
return self._format_str % dict(self)
def __repr__(self):
return "%s '%s'" % (self.typename, self)
def __eq__(self, other):
d = dict(other)
return all(getattr(self, name) == d.get(name, '')
for name in self._required_fields)
class JobReference(Reference):
_required_fields = set(('projectId', 'jobId'))
_format_str = '%(projectId)s:%(jobId)s'
typename = 'job'
class ProjectReference(Reference):
_required_fields = set(('projectId',))
_format_str = '%(projectId)s'
typename = 'project'
class DatasetReference(Reference):
_required_fields = set(('projectId', 'datasetId'))
_format_str = '%(projectId)s:%(datasetId)s'
typename = 'dataset'
def GetProjectReference(self):
return ApiClientHelper.ProjectReference.Create(
projectId=self.projectId)
class TableReference(Reference):
_required_fields = set(('projectId', 'datasetId', 'tableId'))
_format_str = '%(projectId)s:%(datasetId)s.%(tableId)s'
typename = 'table'
def GetDatasetReference(self):
return ApiClientHelper.DatasetReference.Create(
projectId=self.projectId, datasetId=self.datasetId)
def GetProjectReference(self):
return ApiClientHelper.ProjectReference.Create(
projectId=self.projectId)
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/platform/bq/bigquery_client.py
|
Python
|
gpl-3.0
| 86,264
|
[
"VisIt"
] |
eac6915d7546818215e1c85721826401bec304d4873ba88d6ff0f8d677a08a33
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Monte Carlo Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.bayesflow.python.ops import monte_carlo_impl as monte_carlo_lib
from tensorflow.contrib.bayesflow.python.ops.monte_carlo_impl import _get_samples
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
distributions = distributions_lib
layers = layers_lib
monte_carlo = monte_carlo_lib
class ExpectationImportanceSampleTest(test.TestCase):
def test_normal_integral_mean_and_var_correctly_estimated(self):
n = int(1e6)
with self.test_session():
mu_p = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
mu_q = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([0.5, 0.5], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = distributions.Normal(loc=mu_p, scale=sigma_p)
q = distributions.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X].
e_x = monte_carlo.expectation_importance_sampler(
f=lambda x: x, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Compute E_p[X^2].
e_x2 = monte_carlo.expectation_importance_sampler(
f=math_ops.square, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
stddev = math_ops.sqrt(e_x2 - math_ops.square(e_x))
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence of mean is +- 0.003 if n = 100M
# Convergence of stddev is +- 0.00001 if n = 100M
self.assertEqual(p.batch_shape, e_x.get_shape())
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.stddev().eval(), stddev.eval(), rtol=0.02)
def test_multivariate_normal_prob_positive_product_of_components(self):
# Test that importance sampling can correctly estimate the probability that
# the product of components in a MultivariateNormal are > 0.
n = 1000
with self.test_session():
p = distributions.MultivariateNormalDiag(
loc=[0.0, 0.0], scale_diag=[1.0, 1.0])
q = distributions.MultivariateNormalDiag(
loc=[0.5, 0.5], scale_diag=[3., 3.])
# Compute E_p[X_1 * X_2 > 0], with X_i the ith component of X ~ p(x).
# Should equal 1/2 because p is a spherical Gaussian centered at (0, 0).
def indicator(x):
x1_times_x2 = math_ops.reduce_prod(x, reduction_indices=[-1])
return 0.5 * (math_ops.sign(x1_times_x2) + 1.0)
prob = monte_carlo.expectation_importance_sampler(
f=indicator, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence is +- 0.004 if n = 100k.
self.assertEqual(p.batch_shape, prob.get_shape())
self.assertAllClose(0.5, prob.eval(), rtol=0.05)
class ExpectationImportanceSampleLogspaceTest(test.TestCase):
def test_normal_distribution_second_moment_estimated_correctly(self):
# Test the importance sampled estimate against an analytical result.
n = int(1e6)
with self.test_session():
mu_p = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
mu_q = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([1.0, 2 / 3.], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = distributions.Normal(loc=mu_p, scale=sigma_p)
q = distributions.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X^2].
# Should equal [1, (2/3)^2]
log_e_x2 = monte_carlo.expectation_importance_sampler_logspace(
log_f=lambda x: math_ops.log(math_ops.square(x)),
log_p=p.log_prob,
sampling_dist_q=q,
n=n,
seed=42)
e_x2 = math_ops.exp(log_e_x2)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual(p.batch_shape, e_x2.get_shape())
self.assertAllClose([1., (2 / 3.)**2], e_x2.eval(), rtol=0.02)
class ExpectationTest(test.TestCase):
def test_mc_estimate_of_normal_mean_and_variance_is_correct_vs_analytic(self):
random_seed.set_random_seed(0)
n = 20000
with self.test_session():
p = distributions.Normal(loc=[1.0, -1.0], scale=[0.3, 0.5])
# Compute E_p[X] and E_p[X^2].
z = p.sample(n, seed=42)
e_x = monte_carlo.expectation(lambda x: x, p, z=z, seed=42)
e_x2 = monte_carlo.expectation(math_ops.square, p, z=z, seed=0)
var = e_x2 - math_ops.square(e_x)
self.assertEqual(p.batch_shape, e_x.get_shape())
self.assertEqual(p.batch_shape, e_x2.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.variance().eval(), var.eval(), rtol=0.02)
class GetSamplesTest(test.TestCase):
"""Test the private method 'get_samples'."""
def test_raises_if_both_z_and_n_are_none(self):
with self.test_session():
dist = distributions.Normal(loc=0., scale=1.)
z = None
n = None
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_raises_if_both_z_and_n_are_not_none(self):
with self.test_session():
dist = distributions.Normal(loc=0., scale=1.)
z = dist.sample(seed=42)
n = 1
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_returns_n_samples_if_n_provided(self):
with self.test_session():
dist = distributions.Normal(loc=0., scale=1.)
z = None
n = 10
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
def test_returns_z_if_z_provided(self):
with self.test_session():
dist = distributions.Normal(loc=0., scale=1.)
z = dist.sample(10, seed=42)
n = None
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
if __name__ == '__main__':
test.main()
|
memo/tensorflow
|
tensorflow/contrib/bayesflow/python/kernel_tests/monte_carlo_test.py
|
Python
|
apache-2.0
| 7,220
|
[
"Gaussian"
] |
bb4050e73c54c560332c7b2bdc869ea66dd52ca643f7b3743c9cdc0820f90568
|
# -*- coding: utf-8 -*-
# Copyright (C) Brian Moe (2013-2014), Duncan Macleod (2014-)
#
# This file is part of LIGO CIS Core.
#
# LIGO CIS Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LIGO CIS Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LIGO CIS Core. If not, see <http://www.gnu.org/licenses/>.
"""Admin configuration for CIS Server
"""
from django.contrib import admin
from reversion.admin import VersionAdmin
from . import version
from .models import (Channel, Ifo, Subsystem, Description, ChannelDescription)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__credits__ = 'Brian Moe'
__version__ = version.version
class ChannelAdmin(VersionAdmin):
"""`VersionAdmin` for the `~cisserver.models.Channel` model
"""
search_fields = ['name']
admin.site.register(Channel, ChannelAdmin)
class ChannelDescriptionAdmin(VersionAdmin):
"""`VersionAdmin` for the `~cisserver.models.ChannelDescription` model
"""
search_fields = ['name']
admin.site.register(ChannelDescription, ChannelDescriptionAdmin)
class IfoAdmin(VersionAdmin):
"""`VersionAdmin` for the `~cisserver.models.Ifo` model
"""
list_display = ('name', 'description')
admin.site.register(Ifo, IfoAdmin)
class SubsystemAdmin(VersionAdmin):
"""`VersionAdmin` for the `~cisserver.models.Subsystem` model
"""
list_display = ('name', 'description')
admin.site.register(Subsystem, SubsystemAdmin)
class DescriptionAdmin(VersionAdmin):
"""`VersionAdmin` for the `~cisserver.models.Description` model
"""
list_display = ('fullname',)
admin.site.register(Description, DescriptionAdmin)
|
lscsoft/cis.server
|
cisserver/admin.py
|
Python
|
gpl-3.0
| 2,088
|
[
"Brian",
"MOE"
] |
d15d3cd7a835a5e443d2a0f32e32ea828373abc65d1407a8c15cb3b09c6ac49e
|
#!/usr/bin/env python
# CREATED:2013-03-08 15:25:18 by Brian McFee <brm2132@columbia.edu>
# unit tests for librosa.feature (feature.py)
#
# Run me as follows:
# cd tests/
# nosetests -v
#
# This test suite verifies that librosa core routines match (numerically) the output
# of various DPWE matlab implementations on a broad range of input parameters.
#
# All test data is generated by the Matlab script "makeTestData.m".
# Each test loads in a .mat file which contains the input and desired output for a given
# function. The test then runs the librosa implementation and verifies the results
# against the desired output, typically via numpy.allclose().
#
# CAVEATS:
#
# Currently, not all tests are exhaustive in parameter space. This is typically due
# restricted functionality of the librosa implementations. Similarly, there is no
# fuzz-testing here, so behavior on invalid inputs is not yet well-defined.
#
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except KeyError:
pass
import matplotlib
matplotlib.use('Agg')
import six
import glob
import numpy as np
import scipy.io
from nose.tools import eq_, raises
import warnings
import librosa
# -- utilities --#
def files(pattern):
test_files = glob.glob(pattern)
test_files.sort()
return test_files
def load(infile):
DATA = scipy.io.loadmat(infile, chars_as_strings=True)
return DATA
# -- --#
# -- Tests --#
def test_hz_to_mel():
def __test_to_mel(infile):
DATA = load(infile)
z = librosa.hz_to_mel(DATA['f'], DATA['htk'])
assert np.allclose(z, DATA['result'])
for infile in files('data/feature-hz_to_mel-*.mat'):
yield (__test_to_mel, infile)
pass
def test_mel_to_hz():
def __test_to_hz(infile):
DATA = load(infile)
z = librosa.mel_to_hz(DATA['f'], DATA['htk'])
assert np.allclose(z, DATA['result'])
for infile in files('data/feature-mel_to_hz-*.mat'):
yield (__test_to_hz, infile)
pass
def test_hz_to_octs():
def __test_to_octs(infile):
DATA = load(infile)
z = librosa.hz_to_octs(DATA['f'])
assert np.allclose(z, DATA['result'])
for infile in files('data/feature-hz_to_octs-*.mat'):
yield (__test_to_octs, infile)
pass
def test_melfb():
def __test(infile):
DATA = load(infile)
wts = librosa.filters.mel(DATA['sr'][0],
DATA['nfft'][0],
n_mels=DATA['nfilts'][0],
fmin=DATA['fmin'][0],
fmax=DATA['fmax'][0],
htk=DATA['htk'][0])
# Our version only returns the real-valued part.
# Pad out.
wts = np.pad(wts, [(0, 0),
(0, int(DATA['nfft'][0]//2 - 1))],
mode='constant')
eq_(wts.shape, DATA['wts'].shape)
assert np.allclose(wts, DATA['wts'])
for infile in files('data/feature-melfb-*.mat'):
yield (__test, infile)
def test_chromafb():
def __test(infile):
DATA = load(infile)
octwidth = DATA['octwidth'][0, 0]
if octwidth == 0:
octwidth = None
wts = librosa.filters.chroma(DATA['sr'][0, 0],
DATA['nfft'][0, 0],
DATA['nchroma'][0, 0],
A440=DATA['a440'][0, 0],
ctroct=DATA['ctroct'][0, 0],
octwidth=octwidth,
norm=2,
base_c=False)
# Our version only returns the real-valued part.
# Pad out.
wts = np.pad(wts, [(0, 0),
(0, int(DATA['nfft'][0, 0]//2 - 1))],
mode='constant')
eq_(wts.shape, DATA['wts'].shape)
assert np.allclose(wts, DATA['wts'])
for infile in files('data/feature-chromafb-*.mat'):
yield (__test, infile)
def test__window():
def __test(n, window):
wdec = librosa.filters.__float_window(window)
if n == int(n):
n = int(n)
assert np.allclose(wdec(n), window(n))
else:
wf = wdec(n)
fn = int(np.floor(n))
assert not np.any(wf[fn:])
for n in [16, 16.0, 16.25, 16.75]:
for window_name in ['barthann', 'bartlett', 'blackman',
'blackmanharris', 'bohman', 'boxcar', 'cosine',
'flattop', 'hamming', 'hann', 'hanning',
'nuttall', 'parzen', 'triang']:
window = getattr(scipy.signal.windows, window_name)
yield __test, n, window
def test_constant_q():
def __test(sr, fmin, n_bins, bins_per_octave, tuning, filter_scale,
pad_fft, norm):
F, lengths = librosa.filters.constant_q(sr,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
pad_fft=pad_fft,
norm=norm)
assert np.all(lengths <= F.shape[1])
eq_(len(F), n_bins)
if not pad_fft:
return
eq_(np.mod(np.log2(F.shape[1]), 1.0), 0.0)
# Check for vanishing negative frequencies
F_fft = np.abs(np.fft.fft(F, axis=1))
# Normalize by row-wise peak
F_fft = F_fft / np.max(F_fft, axis=1, keepdims=True)
assert not np.any(F_fft[:, -F_fft.shape[1]//2:] > 1e-4)
sr = 11025
# Try to make a cq basis too close to nyquist
yield (raises(librosa.ParameterError)(__test), sr, sr/2.0, 1, 12, 0, 1, True, 1)
# with negative fmin
yield (raises(librosa.ParameterError)(__test), sr, -60, 1, 12, 0, 1, True, 1)
# with negative bins_per_octave
yield (raises(librosa.ParameterError)(__test), sr, 60, 1, -12, 0, 1, True, 1)
# with negative bins
yield (raises(librosa.ParameterError)(__test), sr, 60, -1, 12, 0, 1, True, 1)
# with negative filter_scale
yield (raises(librosa.ParameterError)(__test), sr, 60, 1, 12, 0, -1, True, 1)
# with negative norm
yield (raises(librosa.ParameterError)(__test), sr, 60, 1, 12, 0, 1, True, -1)
for fmin in [None, librosa.note_to_hz('C3')]:
for n_bins in [12, 24]:
for bins_per_octave in [12, 24]:
for tuning in [0, 0.25]:
for filter_scale in [1, 2]:
for norm in [1, 2]:
for pad_fft in [False, True]:
yield (__test, sr, fmin, n_bins,
bins_per_octave, tuning,
filter_scale, pad_fft,
norm)
def test_window_bandwidth():
eq_(librosa.filters.window_bandwidth('hann'),
librosa.filters.window_bandwidth(scipy.signal.hann))
def test_window_bandwidth_missing():
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as out:
x = librosa.filters.window_bandwidth('unknown_window')
eq_(x, 1)
assert len(out) > 0
assert out[0].category is UserWarning
assert 'Unknown window function' in str(out[0].message)
def binstr(m):
out = []
for row in m:
line = [' '] * len(row)
for i in np.flatnonzero(row):
line[i] = '.'
out.append(''.join(line))
return '\n'.join(out)
def test_cq_to_chroma():
def __test(n_bins, bins_per_octave, n_chroma, fmin, base_c, window):
# Fake up a cqt matrix with the corresponding midi notes
if fmin is None:
midi_base = 24 # C2
else:
midi_base = librosa.hz_to_midi(fmin)
midi_notes = np.linspace(midi_base,
midi_base + n_bins * 12.0 / bins_per_octave,
endpoint=False,
num=n_bins)
# We don't care past 2 decimals here.
# the log2 inside hz_to_midi can cause problems though.
midi_notes = np.around(midi_notes, decimals=2)
C = np.diag(midi_notes)
cq2chr = librosa.filters.cq_to_chroma(n_input=C.shape[0],
bins_per_octave=bins_per_octave,
n_chroma=n_chroma,
fmin=fmin,
base_c=base_c,
window=window)
chroma = cq2chr.dot(C)
for i in range(n_chroma):
v = chroma[i][chroma[i] != 0]
v = np.around(v, decimals=2)
if base_c:
resid = np.mod(v, 12)
else:
resid = np.mod(v - 9, 12)
resid = np.round(resid * n_chroma / 12.0)
assert np.allclose(np.mod(i - resid, 12), 0.0), i-resid
for n_octaves in [2, 3, 4]:
for semitones in [1, 3]:
for n_chroma in 12 * np.arange(1, 1 + semitones):
for fmin in [None] + list(librosa.midi_to_hz(range(48, 61))):
for base_c in [False, True]:
for window in [None, [1]]:
bins_per_octave = 12 * semitones
n_bins = n_octaves * bins_per_octave
if np.mod(bins_per_octave, n_chroma) != 0:
tf = raises(librosa.ParameterError)(__test)
else:
tf = __test
yield (tf, n_bins, bins_per_octave,
n_chroma, fmin, base_c, window)
|
craffel/librosa
|
tests/test_filters.py
|
Python
|
isc
| 10,149
|
[
"Brian"
] |
a9a9e08c2b5b670359f3944fa2da0e2aaf69ba8956e49aa0e479e1437f1b939b
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import sys
import yaml
import time
from collections import defaultdict
from jinja2 import Environment
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.token import GalaxyToken
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.unicode import to_unicode
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True,
help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option(
'--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.add_option('--platforms', dest='platforms',
help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='tags',
help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author',
help='GitHub username')
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]")
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
# options that apply to more than one action
if not self.action in ("delete","import","init","login","setup"):
self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. '
'The default is the roles_path configured in your '
'ansible.cfg file (/etc/ansible/roles if not configured)')
if self.action in ("import","info","init","install","login","search","setup","delete"):
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER,
help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=False,
help='Ignore SSL certificate validation errors.')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role')
self.options, self.args =self.parser.parse_args()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
return True
def run(self):
super(GalaxyCLI, self).run()
# if not offline, get connect to galaxy api
if self.action in ("import","info","install","search","login","setup","delete") or \
(self.action == 'init' and not self.options.offline):
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.get_opt("ignore_errors", False):
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = [u"", u"Role: %s" % to_unicode(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text += "\t%s: \n" % (k)
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
Executes the init action, which creates the skeleton framework
of a role that complies with the galaxy metadata format.
"""
init_path = self.get_opt('init_path', './')
force = self.get_opt('force', False)
offline = self.get_opt('offline', False)
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
# create default README.md
if not os.path.exists(role_path):
os.makedirs(role_path)
readme_path = os.path.join(role_path, "README.md")
f = open(readme_path, "wb")
f.write(self.galaxy.default_readme)
f.close()
# create default .travis.yml
travis = Environment().from_string(self.galaxy.default_travis).render()
f = open(os.path.join(role_path, '.travis.yml'), 'w')
f.write(travis)
f.close()
for dir in GalaxyRole.ROLE_DIRS:
dir_path = os.path.join(init_path, role_name, dir)
main_yml_path = os.path.join(dir_path, 'main.yml')
# create the directory if it doesn't exist already
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# now create the main.yml file for that directory
if dir == "meta":
# create a skeleton meta/main.yml with a valid galaxy_info
# datastructure in place, plus with all of the available
# platforms included (but commented out), the galaxy_tags
# list, and the dependencies section
platforms = []
if not offline and self.api:
platforms = self.api.get_list("platforms") or []
# group the list of platforms from the api based
# on their names, with the release field being
# appended to a list of versions
platform_groups = defaultdict(list)
for platform in platforms:
platform_groups[platform['name']].append(platform['release'])
platform_groups[platform['name']].sort()
inject = dict(
author = 'your name',
company = 'your company (optional)',
license = 'license (GPLv2, CC-BY, etc)',
issue_tracker_url = 'http://example.com/issue/tracker',
min_ansible_version = '1.2',
platforms = platform_groups,
)
rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject)
f = open(main_yml_path, 'w')
f.write(rendered_meta)
f.close()
pass
elif dir == "tests":
# create tests/test.yml
inject = dict(
role_name = role_name
)
playbook = Environment().from_string(self.galaxy.default_test).render(inject)
f = open(os.path.join(dir_path, 'test.yml'), 'w')
f.write(playbook)
f.close()
# create tests/inventory
f = open(os.path.join(dir_path, 'inventory'), 'w')
f.write('localhost')
f.close()
elif dir not in ('files','templates'):
# just write a (mostly) empty YAML file for main.yml
f = open(main_yml_path, 'w')
f.write('---\n# %s file for %s\n' % (dir,role_name))
f.close()
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
Executes the info action. This action prints out detailed
information about an installed role as well as info available
from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.get_opt("roles_path")
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if self.api:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec= req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
### FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Executes the installation action. The args list contains the
roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github),
or it can be a local .tar.gz file.
"""
role_file = self.get_opt("role_file", None)
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file
# or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and role_file is not None:
# using a role file is mutually exclusive of specifying
# the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.get_opt("no_deps", False)
force = self.get_opt('force', False)
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
role = RoleRequirement.role_yaml_parse(role)
display.vvv('found role %s in yaml file' % str(role))
if 'name' not in role and 'scm' not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
display.deprecated("going forward only the yaml format will be supported")
# roles listed in a file, one per line
for rline in f.readlines():
if rline.startswith("#") or rline.strip() == '':
continue
display.debug('found role %s in text file' % str(rline))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
display.error('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
display.vvv('Installing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None and not force:
display.display('- %s is already installed, skipping.' % role.name)
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None or force:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % dep_role.name)
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
Executes the remove action. The args list contains the list
of roles to be removed. This list can contain more than one role.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
Executes the list action. The args list can contain zero
or one role. If one is specified, only that role will be
shown, otherwise all roles in the specified directory will
be shown.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
display.display("- %s, %s" % (name, version))
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
roles_path = os.path.expanduser(roles_path)
if not os.path.exists(roles_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path)
elif not os.path.isdir(roles_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
path_files = os.listdir(roles_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
Verify user's identify via Github and retreive an auth token from Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
"""
Import a role into Galaxy
"""
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = self.args.pop()
github_user = self.args.pop()
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
"""
Setup an integration from Github or Travis
"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']),color=C.COLOR_OK)
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
return 0
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
"""
Delete a role from galaxy.ansible.com
"""
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name))
display.display(resp['status'])
return True
|
dochang/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 30,367
|
[
"Galaxy"
] |
431c1961f910a20b7d6935b946de19fd2262bc9c09acf668eb75101d3f35d955
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
reader = chigger.exodus.ExodusReader('../input/zero_variable.e')
mug = chigger.exodus.ExodusResult(reader, cmap='viridis', variable='zero')
cbar = chigger.exodus.ExodusColorBar(mug)
window = chigger.RenderWindow(mug, cbar, size=[600,400], test=True)
window.write('zero_range.png')
window.start()
|
nuclear-wizard/moose
|
python/chigger/tests/colorbar/zero_range.py
|
Python
|
lgpl-2.1
| 678
|
[
"MOOSE"
] |
6bc14fb772539b329270bcb108c36281b8bb987e7f859c2b4e7de75c0b335505
|
"""
Implementation of the class `OpenFOAMSimulation`.
"""
import os
import numpy
from scipy import signal
from matplotlib import pyplot
from ..simulation import Simulation
from ..force import Force
class OpenFOAMSimulation(Simulation):
"""
Contains info about a OpenFOAM simulation.
Inherits from class Simulation.
"""
def __init__(self, description=None, directory=os.getcwd(), **kwargs):
"""
Initializes by calling the parent constructor.
Parameters
----------
description: string, optional
Description of the simulation;
default: None.
directory: string, optional
Directory of the simulation;
default: <current working directory>.
"""
super(OpenFOAMSimulation, self).__init__(software='openfoam',
description=description,
directory=directory,
**kwargs)
def read_forces(self,
display_coefficients=False,
labels=None,
forces_folder=os.path.join('postProcessing',
'forces'),
force_coefficients_folder=os.path.join('postProcessing',
'forceCoeffs'),
usecols=(0, 2, 3)):
"""
Reads forces from files.
Parameters
----------
display_coefficients: boolean, optional
Set to 'True' if force coefficients are required;
default: False (i.e. forces).
labels: list of strings, optional
Label of each force to read;
default: None.
forces_folder: string, optional
Relative path from the simulation directory to the folder containing
the forces;
default: 'postProcessing/forces'.
force_coefficients_folder: string, optional
Relative path from the simulation directory to the folder containing
the force coefficients;
default: 'postProcessing/forceCoeffs'.
usecols: tuple of integers, optional
Index of columns to read from file, including the time-column index;
default: (0, 2, 3).
"""
if display_coefficients:
info = {'directory': os.path.join(self.directory,
force_coefficients_folder),
'file-name': 'forceCoeffs.dat',
'description': 'force-coefficients'}
if not labels:
labels = ['$C_d$', '$C_l$']
else:
info = {'directory': os.path.join(self.directory,
forces_folder),
'file-name': 'forces.dat',
'description': 'forces'}
if not labels:
labels = ['$F_x$', '$F_y$']
info['usecols'] = usecols
info['labels'] = labels
# backward compatibility from 2.2.2 to 2.0.1
if not os.path.isdir(info['directory']):
info['directory'] = '{}/forces'.format(self.directory)
info['usecols'] = (0, 1, 2)
# end of backward compatibility
print('[info] reading {} in {} ...'.format(info['description'],
info['directory']))
subdirectories = sorted(os.listdir(info['directory']))
times = numpy.empty(0)
force_x, force_y = numpy.empty(0), numpy.empty(0)
for subdirectory in subdirectories:
forces_path = os.path.join(info['directory'],
subdirectory,
info['file-name'])
with open(forces_path, 'r') as infile:
t, fx, fy = numpy.loadtxt(infile,
dtype=float,
comments='#',
usecols=info['usecols'],
unpack=True)
times = numpy.append(times, t)
force_x, force_y = numpy.append(force_x, fx), numpy.append(force_y, fy)
# set Force objects
self.forces = []
self.forces.append(Force(times, force_x, label=labels[0]))
self.forces.append(Force(times, force_y, label=labels[1]))
def read_maximum_cfl(self, file_path):
"""
Reads the instantaneous maximum CFL number from a given log file.
Parameters
----------
file_path: string
Path of the logging file containing the instantaneous maximum CFL number.
Returns
-------
cfl: dictionary of (string, 1D array of floats) items
Contains the discrete time and cfl values.
"""
print('[info] reading CFL from {} ...'.format(file_path)),
with open(file_path, 'r') as infile:
times = numpy.array([float(line.split()[-1])
for line in infile if line.startswith('Time = ')])
with open(file_path, 'r') as infile:
cfl = numpy.array([float(line.split()[-1])
for line in infile
if line.startswith('Courant Number mean')])
assert(times.shape == cfl.shape)
self.cfl = {'times': times, 'values': cfl}
print('done')
return self.cfl
def get_mean_maximum_cfl(self, limits=(0.0, float('inf'))):
"""
Computes the mean CFL number.
Parameters
----------
limits: list of floats, optional
Time-limits to compute the mean value;
default: (0.0, float('inf')).
Returns
-------
mean: dictionary of (string, float) items
The mean value and the actual time-limits used to average the CFL.
"""
print('[info] computing the mean CFL number ...')
mask = numpy.where(numpy.logical_and(self.cfl['times'] >= limits[0],
self.cfl['times'] <= limits[1]))[0]
self.cfl['mean'] = {'start': self.cfl['times'][mask[0]],
'end': self.cfl['times'][mask[-1]],
'value': self.cfl['values'].mean()}
print('[info] averaging the maximum CFL number '
'between {} and {} time-units:'.format(self.cfl['mean']['start'],
self.cfl['mean']['end']))
print('\t<max(CFL)> = {}'.format(self.cfl['mean']['value']))
return self.cfl['mean']
def plot_maximum_cfl(self,
display_extrema=False, order=5,
limits=(0.0, float('inf'), 0.0, float('inf')),
save_directory=None, save_name=None, fmt='png',
style='mesnardo',
show=False):
"""
Plots the instantaneous maximum CFL number.
Parameters
----------
time: 1d array of floats
Discrete time values.
cfl: 1d array of floats
Maximum CFL values.
display_extrema: boolean, optional
Set 'True' to emphasize the extrema of the curves;
default: False.
order: integer, optional
Number of neighbors used on each side to define an extremum;
default: 5.
limits: list of floats, optional
Limits of the axes [xmin, xmax, ymin, ymax];
default: [0.0, +inf, 0.0, +inf].
directory: string, optional
Directory of the simulation;
default: <current directory>.
save_directory: string, optional
Directory where to save the figure;
default: None (will be '<simulation directory>/images').
save_name: string, optional
Name of the file to save;
default: None (does not save).
fmt: string, optional
Format to save the figure;
default: 'png'.
style: string, optional
Name of the .mplstyle file that contains to the style.
The file should be located in the folder 'snake/styles';
default: 'mesnardo'.
show: boolean, optional
Set 'True' to display the figure;
default: False.
"""
print('[info] plotting cfl ...')
pyplot.style.use(os.path.join(os.environ['SNAKE'],
'snake',
'styles',
style + '.mplstyle'))
fig, ax = pyplot.subplots(figsize=(8, 6))
color_cycle = ax._get_lines.prop_cycler
color = next(color_cycle)['color']
ax.grid(True, zorder=0)
ax.set_xlabel('time', fontsize=18)
ax.set_ylabel('maximum CFL', fontsize=18)
ax.plot(self.cfl['times'], self.cfl['values'], color=color, zorder=10)
if display_extrema:
minima = signal.argrelextrema(self.cfl['values'], numpy.less_equal,
order=order)[0][:-1]
maxima = signal.argrelextrema(self.cfl['values'], numpy.greater_equal,
order=order)[0][:-1]
# remove indices that are too close
minima = minima[numpy.append(True, minima[1:] - minima[:-1] > order)]
maxima = maxima[numpy.append(True, maxima[1:] - maxima[:-1] > order)]
ax.scatter(self.cfl['times'][minima], self.cfl['values'][minima],
c=color, marker='o', zorder=10)
ax.scatter(self.cfl['times'][maxima], self.cfl['values'][maxima],
c=color, marker='o', zorder=10)
ax.axis(limits)
if save_name:
if not save_directory:
save_directory = os.path.join(self.directory, 'images')
print('[info] saving figure in directory {} ...'.format(save_directory))
if not os.path.isdir(save_directory):
os.makedirs(save_directory)
pyplot.savefig(os.path.join(save_directory, save_name + '.' + fmt),
bbox_inches='tight',
format=fmt)
if show:
print('[info] displaying figure ...')
pyplot.show()
pyplot.close()
def create_matplotlib_colormap(self, colormap_name, file_path=None):
"""
Writes the values of a Matplotlib colormap into a temporary file located in
the current working directory.
The list of Matplotlib colormaps is available
[here](http://matplotlib.org/examples/color/colormaps_reference.html))
Parameters
----------
colormap_name: string
Name of the Matplotlib colormap to write into a file.
file_path: string, optional
Path of the file to write;
default: None (will be '<colormap_name>_tmp.dat').
Returns
-------
file_path: string
Path of the file created.
"""
from matplotlib import cm
file_path = os.path.join(os.getcwd(),
colormap_name + '_tmp.dat')
print('[info] write colormap {} from Matplotlib into file {} ...'
''.format(colormap_name, file_path))
with open(file_path, 'w') as outfile:
colormap_object = getattr(cm, colormap_name)
try:
colors = colormap_object.colors
except:
colors = []
for i in range(colormap_object.N):
colors.append(colormap_object(i)[:-1])
for color in colors:
outfile.write('{}, {}, {}\n'.format(*color))
return file_path
def plot_field_contours_paraview(self, field_name,
field_range=(-1.0, 1.0),
view=(-2.0, -2.0, 2.0, 2.0),
times=(0, 0, 0),
width=800,
colormap=None,
display_scalar_bar=True,
display_time_text=True,
display_mesh=False):
"""
Plots the contour of a given field using ParaView.
Parameters
----------
field_name: string
Name of field to plot;
choices: vorticity, pressure, x-velocity, y-velocity.
field_range: 2-tuple of floats, optional
Range of the field to plot (min, max);
default: (-1.0, 1.0).
view: 4-tuple of floats, optional
Bottom-left and top-right coordinates of the view to display;
default: (-2.0, -2.0, 2.0, 2.0).
times: 3-tuple of floats, optional
Time-limits followed by the time-increment to consider;
default: (0, 0, 0).
width: integer, optional
Width (in pixels) of the figure;
default: 800.
colormap: string, optional
Name of the Matplotlib colormap to use;
default: None.
display_scalar_bar: boolean, optional
Displays the scalar bar;
default: True.
display_time_text: boolean, optional
Displays the time-unit in the top-left corner;
default: True.
display_mesh: boolean, optional
Displays the mesh (Surface with Edges);
default: False
"""
# create the command-line parameters
arguments = []
arguments.append('--directory ' + self.directory)
arguments.append('--field ' + field_name)
arguments.append('--range {} {}'.format(*field_range))
arguments.append('--times {} {} {}'.format(*times))
arguments.append('--view {} {} {} {}'.format(*view))
arguments.append('--width {}'.format(width))
if display_mesh:
arguments.append('--mesh')
if not display_scalar_bar:
arguments.append('--no-scalar-bar')
if not display_time_text:
arguments.append('--no-time-text')
if colormap:
colormap_path = self.create_matplotlib_colormap(colormap_name=colormap)
arguments.append('--colormap ' + colormap_path)
# execute the Python script with pvbatch
script = os.path.join(os.environ['SNAKE'],
'snake',
'openfoam',
'plotField2dParaView.py')
os.system('pvbatch {} {}'.format(script, ' '.join(arguments)))
if colormap:
os.remove(colormap_path)
def plot_mesh_paraview(self,
view=(-2.0, -2.0, 2.0, 2.0),
width=800):
"""
Plots the mesh (black lines on white background) using ParaView.
Parameters
----------.
view: 4-tuple of floats, optional
Bottom-left and top-right coordinates of the view to display;
default: (-2.0, -2.0, 2.0, 2.0).
width: integer, optional
Width (in pixels) of the figure;
default: 800.
"""
arguments = []
arguments.append('--directory ' + self.directory)
arguments.append('--view {} {} {} {}'.format(*view))
arguments.append('--width {}'.format(width))
# execute the Python script with pvbatch
script = os.path.join(os.environ['SNAKE'], 'snake', 'openfoam',
'plotMesh2dParaView.py')
os.system('pvbatch {} {}'.format(script, ' '.join(arguments)))
|
mesnardo/snake
|
snake/openfoam/simulation.py
|
Python
|
mit
| 14,299
|
[
"ParaView"
] |
660524f9c5632754b79b9a0fcb408b7dfbe181db5f95069c926ace7311ef8dd6
|
import numpy as np
def log(sigma=1., shape=None): # Laplacien of Gaussian
if shape is None:
width = int(np.ceil(3 * sigma))
shape = (width, width)
result = np.zeros(shape)
nbinx = shape[0]
xmin = -(nbinx - 1) / 2
x = np.arange(xmin, -xmin + .1, 1)
nbiny = shape[1]
ymin = -(nbiny - 1) / 2
y = np.arange(ymin, -ymin + .1, 1)
for binx in range(nbinx):
for biny in range(nbiny):
result[binx, biny] = (
-(
1 - (x[binx] ** 2 + x[binx] ** 2) / (2 * sigma ** 2)
) *
np.exp(
-(x[binx] ** 2 + y[biny] ** 2) / (2 * sigma ** 2)
)
)
return result / np.sum(result)
def gauss(sigma=1., shape=None):
if shape is None:
width = int(np.ceil(3 * sigma))
shape = (width, width)
result = np.zeros(shape)
nbinx = shape[0]
xmin = -(nbinx - 1) / 2
x = np.arange(xmin, -xmin + .1, 1)
nbiny = shape[1]
ymin = -(nbiny - 1) / 2
y = np.arange(ymin, -ymin + .1, 1)
for binx in range(nbinx):
for biny in range(nbiny):
result[binx, biny] = np.exp(
(-x[binx] ** 2 - y[biny] ** 2) / (2 * sigma ** 2))
return result / np.sum(result)
laplacien_33 = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
laplacien_55 = np.array([
[-4, -1, 0, -1, -4],
[-1, 2, 3, 2, -1],
[0, 3, 4, 3, 0],
[-1, 2, 3, 2, -1],
[-4, -1, 0, -1, -4]])
sobelx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
sobely = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
sobelxy = 0.5 * (sobelx + sobely)
high_pass_filter_2525 = -np.ones((25, 25)) / (25 * 25 - 1)
high_pass_filter_2525[12, 12] = 1
high_pass_filter_1313 = -np.ones((13, 13)) / (13 * 13 - 1)
high_pass_filter_1313[6, 6] = 1
high_pass_filter_77 = -np.ones((7, 7)) / (7 * 7 - 1)
high_pass_filter_77[3, 3] = 1
|
calispac/digicampipe
|
digicampipe/image/lidccd/kernels.py
|
Python
|
gpl-3.0
| 1,906
|
[
"Gaussian"
] |
c2715cd1b2d81ce08d115c7f5e2bf802a21cc84cacc9111ae898eb878e0b6edb
|
import ocl
import camvtk
import time
import vtk
import datetime
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
stl = camvtk.STLSurf("../stl/demo.stl")
print "STL surface read"
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s= ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STLSurf with ", s.size(), " triangles"
cutter = ocl.CylCutter(0.6)
#print cutter.str()
minx=-1
dx=0.1
maxx=11
miny=-1
dy=1
maxy=11
z=-0.2
pdf = ocl.PathDropCutter(s)
pdf.SetCutter(cutter)
path = ocl.Path()
exit()
pftp = cam.ParallelFinish()
pftp.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
pftp.dropCutterSTL1(cutter, s)
print " made ", pftp.dcCalls, " drop-cutter calls"
pf2 = cam.ParallelFinish()
pf2.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
pf2.dropCutterSTL2(cutter, s)
print " made ", pf2.dcCalls, " drop-cutter calls"
clpoints = pftp.getCLPoints()
ccpoints = pftp.getCCPoints()
cl2p = pf2.getCLPoints()
cc2p = pf2.getCCPoints()
#CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
nv=0
nn=0
ne=0
nf=0
for cl,cc,cl2 in zip(clpoints,ccpoints,cl2p):
#cutter.dropCutter(cl,cc,t)
#cc = cam.CCPoint()
#cutter.dropCutterSTL(cl,cc,s)
# cutter.vertexDrop(cl,cc,t)
# cutter.edgeDrop(cl,cc,t)
# cutter.facetDrop(cl,cc,t)
if cc.type==cam.CCType.FACET:
nf+=1
col = (0,1,1)
elif cc.type == cam.CCType.VERTEX:
nv+=1
col = (0,1,0)
elif cc.type == cam.CCType.EDGE:
ne+=1
col = (1,0,0)
elif cc.type == cam.CCType.NONE:
#print "type=NONE!"
nn+=1
col = (1,1,1)
#if cl.isInside(t):
# col = (0, 1, 0)
#else:
# col = (1, 0, 0)
myscreen.addActor( camvtk.Point(center=(cl.x,cl.y,cl.z) , color=col) )
myscreen.addActor( camvtk.Point(center=(cl2.x,cl2.y,cl2.z+0.2) , color=(0.6,0.2,0.9)) )
#myscreen.addActor( camvtk.Point(center=(cc.x,cc.y,cc.z), color=col) )
#print cc.type
print "none=",nn," vertex=",nv, " edge=",ne, " facet=",nf, " sum=", nn+nv+ne+nf
print len(clpoints), " cl points evaluated"
myscreen.camera.SetPosition(3, 23, 15)
myscreen.camera.SetFocalPoint(5, 5, 0)
myscreen.render()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
lwr.SetFileName("tux1.png")
#lwr.Write()
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
for n in range(1,36):
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.camera.Azimuth( 1 )
time.sleep(0.01)
myscreen.render()
lwr.SetFileName("kd_frame"+ ('%03d' % n)+".png")
w2if.Modified()
#lwr.Write()
#myscreen.iren.Start()
raw_input("Press Enter to terminate")
|
AlanZatarain/opencamlib
|
src/attic/pfinish_tst_1.py
|
Python
|
gpl-3.0
| 3,278
|
[
"VTK"
] |
fa374b1f2a7cd1c45ccba22acf291c28948b3ee75127abf1a3e7f042f1046551
|
# -*- coding: utf-8 -*-
"""
Subsubmodule for ecg processing.
"""
import numpy as np
import pandas as pd
import biosppy
import scipy
from .bio_rsp import *
from ..signal import *
from ..materials import Path
from ..statistics import *
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def ecg_preprocess(ecg, sampling_rate=1000, filter_type="FIR", filter_band="bandpass", filter_frequency=[3, 45], filter_order=0.3, segmenter="hamilton"):
"""
ECG signal preprocessing.
Parameters
----------
ecg : list or ndarray
ECG signal array.
sampling_rate : int
Sampling rate (samples/second).
filter_type : str or None
Can be Finite Impulse Response filter ("FIR"), Butterworth filter ("butter"), Chebyshev filters ("cheby1" and "cheby2"), Elliptic filter ("ellip") or Bessel filter ("bessel").
filter_band : str
Band type, can be Low-pass filter ("lowpass"), High-pass filter ("highpass"), Band-pass filter ("bandpass"), Band-stop filter ("bandstop").
filter_frequency : int or list
Cutoff frequencies, format depends on type of band: "lowpass" or "bandpass": single frequency (int), "bandpass" or "bandstop": pair of frequencies (list).
filter_order : float
Filter order.
segmenter : str
The cardiac phase segmenter. Can be "hamilton", "gamboa", "engzee", "christov", "ssf" or "pekkanen".
Returns
----------
ecg_preprocessed : dict
Preprocesed ECG.
Example
----------
>>> import neurokit as nk
>>> ecg_preprocessed = nk.ecg_preprocess(signal)
Notes
----------
*Details*
- **segmenter**: Different methods of segmentation are implemented: **hamilton** (`Hamilton, 2002 <http://www.eplimited.com/osea13.pdf/>`_) , **gamboa** (`gamboa, 2008 <http://www.lx.it.pt/~afred/pub/thesisHugoGamboa.pdf/>`_), **engzee** (Engelse and Zeelenberg, 1979; Lourenco et al., 2012), **christov** (Christov, 2004) or **ssf** (Slope Sum Function), **pekkanen** (`Kathirvel, 2001) <http://link.springer.com/article/10.1007/s13239-011-0065-3/fulltext.html>`_.
*Authors*
- the bioSSPy dev team (https://github.com/PIA-Group/BioSPPy)
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- biosppy
- numpy
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
References
-----------
- Hamilton, P. (2002, September). Open source ECG analysis. In Computers in Cardiology, 2002 (pp. 101-104). IEEE.
- Kathirvel, P., Manikandan, M. S., Prasanna, S. R. M., & Soman, K. P. (2011). An efficient R-peak detection based on new nonlinear transformation and first-order Gaussian differentiator. Cardiovascular Engineering and Technology, 2(4), 408-425.
- Canento, F., Lourenço, A., Silva, H., & Fred, A. (2013). Review and Comparison of Real Time Electrocardiogram Segmentation Algorithms for Biometric Applications. In Proceedings of the 6th Int’l Conference on Health Informatics (HEALTHINF).
- Christov, I. I. (2004). Real time electrocardiogram QRS detection using combined adaptive threshold. Biomedical engineering online, 3(1), 28.
- Engelse, W. A. H., & Zeelenberg, C. (1979). A single scan algorithm for QRS-detection and feature extraction. Computers in cardiology, 6(1979), 37-42.
- Lourenço, A., Silva, H., Leite, P., Lourenço, R., & Fred, A. L. (2012, February). Real Time Electrocardiogram Segmentation for Finger based ECG Biometrics. In Biosignals (pp. 49-54).
"""
# Signal Processing
# =======================
# Transform to array
ecg = np.array(ecg)
# Filter signal
if filter_type in ["FIR", "butter", "cheby1", "cheby2", "ellip", "bessel"]:
order = int(filter_order * sampling_rate)
filtered, _, _ = biosppy.tools.filter_signal(signal=ecg,
ftype=filter_type,
band=filter_band,
order=order,
frequency=filter_frequency,
sampling_rate=sampling_rate)
else:
filtered = ecg # filtered is not-filtered
# Segment
if segmenter == "hamilton":
rpeaks, = biosppy.ecg.hamilton_segmenter(signal=filtered, sampling_rate=sampling_rate)
elif segmenter == "gamboa":
rpeaks, = biosppy.ecg.gamboa_segmenter(signal=filtered, sampling_rate=sampling_rate, tol=0.002)
elif segmenter == "engzee":
rpeaks, = biosppy.ecg.engzee_segmenter(signal=filtered, sampling_rate=sampling_rate, threshold=0.48)
elif segmenter == "christov":
rpeaks, = biosppy.ecg.christov_segmenter(signal=filtered, sampling_rate=sampling_rate)
elif segmenter == "ssf":
rpeaks, = biosppy.ecg.ssf_segmenter(signal=filtered, sampling_rate=sampling_rate, threshold=20, before=0.03, after=0.01)
elif segmenter == "pekkanen":
rpeaks = segmenter_pekkanen(ecg=filtered, sampling_rate=sampling_rate, window_size=5.0, lfreq=5.0, hfreq=15.0)
else:
raise ValueError("Unknown segmenter: %s." % segmenter)
# Correct R-peak locations
rpeaks, = biosppy.ecg.correct_rpeaks(signal=filtered,
rpeaks=rpeaks,
sampling_rate=sampling_rate,
tol=0.05)
# Extract cardiac cycles and rpeaks
cardiac_cycles, rpeaks = biosppy.ecg.extract_heartbeats(signal=filtered,
rpeaks=rpeaks,
sampling_rate=sampling_rate,
before=0.2,
after=0.4)
# Compute heart rate
heart_rate_idx, heart_rate = biosppy.tools.get_heart_rate(beats=rpeaks,
sampling_rate=sampling_rate,
smooth=True,
size=3)
# Get time indices
length = len(ecg)
T = (length - 1) / float(sampling_rate)
ts = np.linspace(0, T, length, endpoint=False)
heart_rate_times = ts[heart_rate_idx]
heart_rate_times = np.round(heart_rate_times*sampling_rate).astype(int) # Convert heart rate times to timepoints
# what for is this line in biosppy??
# cardiac_cycles_tmpl = np.linspace(-0.2, 0.4, cardiac_cycles.shape[1], endpoint=False)
# Prepare Output Dataframe
# ==========================
ecg_df = pd.DataFrame({"ECG_Raw": np.array(ecg)}) # Create a dataframe
ecg_df["ECG_Filtered"] = filtered # Add filtered signal
# Add R peaks
rpeaks_signal = np.array([np.nan]*len(ecg))
rpeaks_signal[rpeaks] = 1
ecg_df["ECG_R_Peaks"] = rpeaks_signal
# Heart Rate
try:
heart_rate = interpolate(heart_rate, heart_rate_times, sampling_rate) # Interpolation using 3rd order spline
ecg_df["Heart_Rate"] = heart_rate
except TypeError:
print("NeuroKit Warning: ecg_process(): Sequence too short to compute heart rate.")
ecg_df["Heart_Rate"] = np.nan
# Store Additional Feature
# ========================
processed_ecg = {"df": ecg_df,
"ECG": {
"R_Peaks": rpeaks
}
}
# Heartbeats
heartbeats = pd.DataFrame(cardiac_cycles).T
heartbeats.index = pd.date_range(pd.datetime.today(), periods=len(heartbeats), freq=str(int(1000000/sampling_rate)) + "us")
processed_ecg["ECG"]["Cardiac_Cycles"] = heartbeats
# Waves
waves = ecg_wave_detector(ecg_df["ECG_Filtered"], rpeaks)
processed_ecg["ECG"].update(waves)
# Systole
processed_ecg["df"]["ECG_Systole"] = ecg_systole(ecg_df["ECG_Filtered"], rpeaks, waves["T_Waves_Ends"])
return(processed_ecg)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def ecg_find_peaks(signal, sampling_rate=1000):
"""
Find R peaks indices on the ECG channel.
Parameters
----------
signal : list or ndarray
ECG signal (preferably filtered).
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
rpeaks : list
List of R-peaks location indices.
Example
----------
>>> import neurokit as nk
>>> Rpeaks = nk.ecg_find_peaks(signal)
Notes
----------
*Authors*
- the bioSSPy dev team (https://github.com/PIA-Group/BioSPPy)
*Dependencies*
- biosppy
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
"""
rpeaks, = biosppy.ecg.hamilton_segmenter(np.array(signal), sampling_rate=sampling_rate)
rpeaks, = biosppy.ecg.correct_rpeaks(signal=np.array(signal), rpeaks=rpeaks, sampling_rate=sampling_rate, tol=0.05)
return(rpeaks)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def ecg_wave_detector(ecg, rpeaks):
"""
Returns the localization of the P, Q, T waves. This function needs massive help!
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
Returns
----------
ecg_waves : dict
Contains wave peaks location indices.
Example
----------
>>> import neurokit as nk
>>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000)
>>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000)
>>> rpeaks = ecg["ECG"]["R_Peaks"]
>>> ecg = ecg["df"]["ECG_Filtered"]
>>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks)
>>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"])
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
"""
q_waves = []
p_waves = []
q_waves_starts = []
s_waves = []
t_waves = []
t_waves_starts = []
t_waves_ends = []
for index, rpeak in enumerate(rpeaks[:-3]):
try:
epoch_before = np.array(ecg)[int(rpeaks[index-1]):int(rpeak)]
epoch_before = epoch_before[int(len(epoch_before)/2):len(epoch_before)]
epoch_before = list(reversed(epoch_before))
q_wave_index = np.min(find_peaks(epoch_before))
q_wave = rpeak - q_wave_index
p_wave_index = q_wave_index + np.argmax(epoch_before[q_wave_index:])
p_wave = rpeak - p_wave_index
inter_pq = epoch_before[q_wave_index:p_wave_index]
inter_pq_derivative = np.gradient(inter_pq, 2)
q_start_index = find_closest_in_list(len(inter_pq_derivative)/2, find_peaks(inter_pq_derivative))
q_start = q_wave - q_start_index
q_waves.append(q_wave)
p_waves.append(p_wave)
q_waves_starts.append(q_start)
except ValueError:
pass
except IndexError:
pass
try:
epoch_after = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
epoch_after = epoch_after[0:int(len(epoch_after)/2)]
s_wave_index = np.min(find_peaks(epoch_after))
s_wave = rpeak + s_wave_index
t_wave_index = s_wave_index + np.argmax(epoch_after[s_wave_index:])
t_wave = rpeak + t_wave_index
inter_st = epoch_after[s_wave_index:t_wave_index]
inter_st_derivative = np.gradient(inter_st, 2)
t_start_index = find_closest_in_list(len(inter_st_derivative)/2, find_peaks(inter_st_derivative))
t_start = s_wave + t_start_index
t_end = np.min(find_peaks(epoch_after[t_wave_index:]))
t_end = t_wave + t_end
s_waves.append(s_wave)
t_waves.append(t_wave)
t_waves_starts.append(t_start)
t_waves_ends.append(t_end)
except ValueError:
pass
except IndexError:
pass
# pd.Series(epoch_before).plot()
# t_waves = []
# for index, rpeak in enumerate(rpeaks[0:-1]):
#
# epoch = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
# pd.Series(epoch).plot()
#
# # T wave
# middle = (rpeaks[index+1] - rpeak) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak+quarter):int(rpeak+middle)]
#
# try:
# t_wave = int(rpeak+quarter) + np.argmax(epoch)
# t_waves.append(t_wave)
# except ValueError:
# pass
#
# p_waves = []
# for index, rpeak in enumerate(rpeaks[1:]):
# index += 1
# # Q wave
# middle = (rpeak - rpeaks[index-1]) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak-middle):int(rpeak-quarter)]
#
# try:
# p_wave = int(rpeak-quarter) + np.argmax(epoch)
# p_waves.append(p_wave)
# except ValueError:
# pass
#
# q_waves = []
# for index, p_wave in enumerate(p_waves):
# epoch = np.array(ecg)[int(p_wave):int(rpeaks[rpeaks>p_wave][0])]
#
# try:
# q_wave = p_wave + np.argmin(epoch)
# q_waves.append(q_wave)
# except ValueError:
# pass
#
# # TODO: manage to find the begininng of the Q and the end of the T wave so we can extract the QT interval
ecg_waves = {"T_Waves": t_waves,
"P_Waves": p_waves,
"Q_Waves": q_waves,
"S_Waves": s_waves,
"Q_Waves_Onsets": q_waves_starts,
"T_Waves_Onsets": t_waves_starts,
"T_Waves_Ends": t_waves_ends}
return(ecg_waves)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def ecg_systole(ecg, rpeaks, t_waves_ends):
"""
Returns the localization of systoles and diastoles.
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
t_waves_ends : list or ndarray
T waves localization.
Returns
----------
systole : ndarray
Array indicating where systole (1) and diastole (0).
Example
----------
>>> import neurokit as nk
>>> systole = nk.ecg_systole(ecg, rpeaks, t_waves_ends)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Details*
- **Systole/Diastole**: One prominent channel of body and brain communication is that conveyed by baroreceptors, pressure and stretch-sensitive receptors within the heart and surrounding arteries. Within each cardiac cycle, bursts of baroreceptor afferent activity encoding the strength and timing of each heartbeat are carried via the vagus and glossopharyngeal nerve afferents to the nucleus of the solitary tract. This is the principal route that communicates to the brain the dynamic state of the heart, enabling the representation of cardiovascular arousal within viscerosensory brain regions, and influence ascending neuromodulator systems implicated in emotional and motivational behaviour. Because arterial baroreceptors are activated by the arterial pulse pressure wave, their phasic discharge is maximal during and immediately after the cardiac systole, that is, when the blood is ejected from the heart, and minimal during cardiac diastole, that is, between heartbeats (Azevedo, 2017).
References
-----------
- Azevedo, R. T., Garfinkel, S. N., Critchley, H. D., & Tsakiris, M. (2017). Cardiac afferent activity modulates the expression of racial stereotypes. Nature communications, 8.
- Edwards, L., Ring, C., McIntyre, D., & Carroll, D. (2001). Modulation of the human nociceptive flexion reflex across the cardiac cycle. Psychophysiology, 38(4), 712-718.
- Gray, M. A., Rylander, K., Harrison, N. A., Wallin, B. G., & Critchley, H. D. (2009). Following one's heart: cardiac rhythms gate central initiation of sympathetic reflexes. Journal of Neuroscience, 29(6), 1817-1825.
"""
waves = np.array([""]*len(ecg))
waves[rpeaks] = "R"
waves[t_waves_ends] = "T"
systole = [0]
current = 0
for index, value in enumerate(waves[1:]):
if waves[index-1] == "R":
current = 1
if waves[index-1] == "T":
current = 0
systole.append(current)
return(systole)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def segmenter_pekkanen(ecg, sampling_rate, window_size=5.0, lfreq=5.0, hfreq=15.0):
"""
ECG R peak detection based on `Kathirvel et al. (2001) <http://link.springer.com/article/10.1007/s13239-011-0065-3/fulltext.html>`_ with some tweaks (mainly robust estimation of the rectified signal cutoff threshold).
Parameters
----------
ecg : list or ndarray
ECG signal array.
sampling_rate : int
Sampling rate (samples/second).
window_size : float
Ransac window size.
lfreq : float
Low frequency of the band pass filter.
hfreq : float
High frequency of the band pass filter.
Returns
----------
rpeaks : ndarray
R peaks location.
Example
----------
>>> import neurokit as nk
>>> rpeaks = nk.segmenter_pekkanen(ecg_signal, 1000)
*Authors*
- `Jami Pekkanen <https://github.com/jampekka>`_
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- scipy
- numpy
*See Also*
- rpeakdetect: https://github.com/tru-hy/rpeakdetect
"""
window_size = int(window_size*sampling_rate)
lowpass = scipy.signal.butter(1, hfreq/(sampling_rate/2.0), 'low')
highpass = scipy.signal.butter(1, lfreq/(sampling_rate/2.0), 'high')
# TODO: Could use an actual bandpass filter
ecg_low = scipy.signal.filtfilt(*lowpass, x=ecg)
ecg_band = scipy.signal.filtfilt(*highpass, x=ecg_low)
# Square (=signal power) of the first difference of the signal
decg = np.diff(ecg_band)
decg_power = decg**2
# Robust threshold and normalizator estimation
thresholds = []
max_powers = []
for i in range(int(len(decg_power)/window_size)):
sample = slice(i*window_size, (i+1)*window_size)
d = decg_power[sample]
thresholds.append(0.5*np.std(d))
max_powers.append(np.max(d))
threshold = 0.5*np.std(decg_power)
threshold = np.median(thresholds)
max_power = np.median(max_powers)
decg_power[decg_power < threshold] = 0
decg_power = decg_power/max_power
decg_power[decg_power > 1.0] = 1.0
square_decg_power = decg_power**2
# shannon_energy = -square_decg_power*np.log(square_decg_power) # This errors
# shannon_energy[np.where(np.isfinite(shannon_energy) == False)] = 0.0
shannon_energy = -square_decg_power*np.log(square_decg_power.clip(min=1e-6))
shannon_energy[np.where(shannon_energy <= 0)] = 0.0
mean_window_len = int(sampling_rate*0.125+1)
lp_energy = np.convolve(shannon_energy, [1.0/mean_window_len]*mean_window_len, mode='same')
#lp_energy = scipy.signal.filtfilt(*lowpass2, x=shannon_energy)
lp_energy = scipy.ndimage.gaussian_filter1d(lp_energy, sampling_rate/8.0)
lp_energy_diff = np.diff(lp_energy)
rpeaks = (lp_energy_diff[:-1] > 0) & (lp_energy_diff[1:] < 0)
rpeaks = np.flatnonzero(rpeaks)
rpeaks -= 1
return(rpeaks)
|
neuropsychology/NeuroKit.py
|
neurokit/bio/bio_ecg_preprocessing.py
|
Python
|
mit
| 23,113
|
[
"Gaussian"
] |
7e602c64974c4f739863056ff53533ee98b5a7cddfd623efeea151f2542a3cd4
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2011-2014
# Christian Kohlöffel
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
"""
Special purpose canvas including all required plotting function etc.
"""
from globals.six import text_type
import globals.constants as c
if c.PYQT5notPYQT4:
from PyQt5.QtWidgets import QTextBrowser
from PyQt5 import QtCore
else:
from PyQt4.QtGui import QTextBrowser
from PyQt4 import QtCore
class MessageBox(QTextBrowser):
"""
The MessageBox Class performs the write functions in the Message Window.
The previous defined MessageBox class is used as output (Within ui).
@sideeffect: None
"""
def __init__(self, origobj):
"""
Initialization of the MessageBox class.
@param origobj: This is the reference to to parent class initialized
previously.
"""
super(MessageBox, self).__init__()
self.setOpenExternalLinks(True)
self.append(self.tr("You are using DXF2GCODE"))
self.append(self.tr("Version %s (%s)") % (c.VERSION, c.DATE))
self.append(self.tr("For more information and updates visit:"))
self.append("<a href='http://sourceforge.net/projects/dxf2gcode/'>http://sourceforge.net/projects/dxf2gcode/</a>")
def tr(self, string_to_translate):
"""
Translate a string using the QCoreApplication translation framework
@param: string_to_translate: a unicode string
@return: the translated unicode string if it was possible to translate
"""
return text_type(QtCore.QCoreApplication.translate('MessageBox',
string_to_translate))
def write(self, string):
"""
The function is called by the window logger to write
the log message to the Messagebox
@param charstr: The log message which will be written.
"""
stripped_string = string.strip()
if stripped_string:
self.append(stripped_string)
self.verticalScrollBar().setValue(1e9)
|
Poofjunior/dxf2gcode
|
gui/messagebox.py
|
Python
|
gpl-3.0
| 2,949
|
[
"VisIt"
] |
b53eb1763d9de4b40925780faf76288d5b7debb99494ce8650e44ba5d82c5dce
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#
# $Id: e8192a9e9900106561cd9a10220ad0521cfa1582 $
"""
sqlcmd - a simple SQL command interpreter
Requires:
- The Grizzled Python API (http://www.clapper.org/software/python/grizzled/)
- One or more Python DB API drivers. See the Grizzled "db" package.
- The enum package, from http://cheeseshop.python.org/pypi/enum/
- Python 2.5 or better
COPYRIGHT AND LICENSE
Copyright © 2008-2011 Brian M. Clapper
This is free software, released under the following BSD-like license:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. The end-user documentation included with the redistribution, if any,
must include the following acknowlegement:
This product includes software developed by Brian M. Clapper
(bmc@clapper.org, http://www.clapper.org/bmc/). That software is
copyright © 2008 Brian M. Clapper.
Alternately, this acknowlegement may appear in the software itself, if
and wherever such third-party acknowlegements normally appear.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL BRIAN M. CLAPPER BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
$Id: e8192a9e9900106561cd9a10220ad0521cfa1582 $
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from __future__ import with_statement
from cmd import Cmd
import cPickle
import logging
import os
import re
from StringIO import StringIO
from string import Template as StringTemplate
import sys
import tempfile
import textwrap
import time
import traceback
from grizzled import db
from grizzled.cmdline import CommandLineParser
from grizzled.log import WrappingLogFormatter
from grizzled.misc import str2bool
from grizzled import history
from deprecated_enum import Enum
from sqlcmd.config import SQLCmdConfig
from sqlcmd.exception import *
from sqlcmd.ecmd import ECmd
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
# Info about the module
__version__ = '0.7.1'
__author__ = 'Brian Clapper'
__email__ = 'bmc@clapper.org'
__url__ = 'https://github.com/bmc/sqlcmd/'
__copyright__ = '© 2008-2011 Brian M. Clapper'
__license__ = 'BSD-style license'
__all__ = ['SQLCmd', 'main']
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
MAX_WIDTH = 79
VERSION_STAMP = '''SQLCmd, version %s
Copyright 2008 Brian M. Clapper''' % __version__
INTRO = VERSION_STAMP + '''
Type "help" or "?" for help.
'''
DEFAULT_CONFIG_DIR = os.path.join(os.environ.get('HOME', os.getcwd()),
'.sqlcmd')
RC_FILE = os.path.join(DEFAULT_CONFIG_DIR, 'config')
HISTORY_FILE_FORMAT = os.path.join(DEFAULT_CONFIG_DIR, '%s.hist')
VARIABLE_ASSIGNMENT_RE = re.compile(r'^([A-Za-z0-9_-]+)=(.*)$')
VARIABLE_RE = '[A-Za-z0-9_-]+'
VARIABLE_REFERENCE_PREFIX = '$'
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
log = None
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def main():
rc = 0
try:
Main().run(sys.argv)
except SystemExit:
pass
except:
rc = 1
if log:
log.exception('')
else:
traceback.print_exc()
return rc
def die(s):
"""Like Perl's die()"""
log.error(s)
sys.exit(1)
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class Variable(object):
"""Captures information about a sqlcmd variable."""
def __init__(self,
name,
type,
initialValue,
docstring,
onChangeFunc=None):
self.name = name
self.type = type
self.defaultValue = initialValue
self.value = initialValue
self.onChange = onChangeFunc
self.docstring = docstring
def set_value_from_string(self, s):
new_value = None
if self.type == SQLCmd.VAR_TYPES.boolean:
new_value = str2bool(s)
elif self.type == SQLCmd.VAR_TYPES.string:
new_value = s
elif self.type == SQLCmd.VAR_TYPES.integer:
new_value = int(s)
else:
assert(false)
if new_value != self.value:
self.value = new_value
if self.onChange != None:
self.onChange(self)
def strValue(self):
if self.type == SQLCmd.VAR_TYPES.boolean:
if self.value:
return "true"
else:
return "false"
if self.type == SQLCmd.VAR_TYPES.string:
return self.value
if self.type == SQLCmd.VAR_TYPES.integer:
return str(self.value)
def __str__(self):
return '%s %s = %s' % (self.type, self.name, self.strValue())
def __hash__(self):
return self.name.__hash__()
class SQLCmdStringTemplate(StringTemplate):
idpattern = VARIABLE_RE
def substitute(self, vardict):
class DictWrapper(dict):
def __init__(self, realdict):
self.realdict = realdict
def __getitem__(self, key):
try:
return self.realdict[key]
except KeyError:
return ''
return StringTemplate.substitute(self, DictWrapper(vardict))
class SQLCmd(ECmd):
"""The SQLCmd command interpreter."""
DEFAULT_HISTORY_MAX = history.DEFAULT_MAXLENGTH
COMMENT_PREFIX = '--'
MAIN_PROMPT = '? '
CONTINUATION_PROMPT = '> '
META_COMMAND_PREFIX = '.'
BINARY_VALUE_MARKER = "<binary>"
BINARY_FILTER = ''.join([(len(repr(chr(x)))==3) and chr(x) or '?'
for x in range(256)])
NO_SEMI_NEEDED = set(['help', '?', 'r', 'begin', 'commit', 'rollback',
'eof'])
NO_VAR_SUB = set(['.show'])
VAR_TYPES = Enum('boolean', 'string', 'integer')
def __init__(self, cfg):
Cmd.__init__(self)
self.prompt = "? "
self.__config = cfg
self.__db = None
self.__partial_command = None
self.__partial_cmd_history_start = None
self.__db_config = None
self.__history_file = None
self.__settings = {}
self.__variables = {}
self.__interactive = True
self.__in_multiline_command = False
self.save_history = True
self.identchars = Cmd.identchars + '.'
self.__aborted = False
def autocommitChanged(var):
if var.value == True:
# Autocommit changed
db = self.__db
if db != None:
print "Autocommit enabled. Committing current transaction."
db.commit()
vars = [
Variable('autocommit', SQLCmd.VAR_TYPES.boolean, True,
'Whether SQL statements are auto-committed or not.',
autocommitChanged),
Variable('binarymax', SQLCmd.VAR_TYPES.integer, 20,
'Number of characters to show in a BINARY column, if '
'"showbinary" is "true".'),
Variable('colspacing', SQLCmd.VAR_TYPES.integer, 1,
'Number of spaces to use between columns when displaying '
'the output of a SELECT statement.'),
Variable('echo', SQLCmd.VAR_TYPES.boolean, False,
'Whether or not SQL statements are echoed.'),
Variable('history', SQLCmd.VAR_TYPES.boolean, True,
'Whether or not to save commands in the history.'),
Variable('stacktrace', SQLCmd.VAR_TYPES.boolean, False,
'Whether or not to show a stack trace on error.'),
Variable('showbinary', SQLCmd.VAR_TYPES.boolean, False,
'Whether or not to try to display BINARY column values.'),
Variable('timings', SQLCmd.VAR_TYPES.boolean, True,
'Whether or not to show how SQL statements take.'),
]
for v in vars:
self.__settings[v.name] = v
self.__init_settings_from_config()
def run_file_and_exit(self, file):
self.__run_file(file)
self.cmdqueue += ["EOF"]
self.__interactive = False
self.__prompt = ""
self.cmdloop()
def preloop(self):
# Would use Cmd.intro to put out the introduction, except that
# preloop() gets called first, and the intro should come out BEFORE
# the 'Connecting...' message. The other solution would be to override
# cmdloop(), but putting out the intro manually is simpler.
print INTRO
if self.__db_config != None:
try:
self.__connect_to(self.__db_config)
except AssertionError:
traceback.print_exc()
except:
etype, evalue, etb = sys.exc_info()
self.__handle_exception(evalue)
else:
self.__init_history()
def onecmd(self, line):
stop = False
try:
stop = Cmd.onecmd(self, line)
except:
etype, evalue, etb = sys.exc_info()
self.__handle_exception(evalue)
return stop
def set_database(self, database_alias):
assert self.__config != None
config_item = self.__config.find_match(database_alias)
assert(config_item != None)
self.__db_config = config_item
def interrupted(self):
self.__partial_command = None
self.prompt = SQLCmd.MAIN_PROMPT
print
def precmd(self, s):
tokens = s.split(None, 1)
if len(tokens) == 0:
return ''
if not (tokens[0] in SQLCmd.NO_VAR_SUB):
s = SQLCmdStringTemplate(s).substitute(self.__variables)
s = s.strip()
# Split again, now that we've substituted.
tokens = s.split(None, 1)
if len(tokens) == 1:
first = s
args = []
else:
first = tokens[0]
args = tokens[1:]
if not self.__in_multiline_command:
first = first.lower()
need_semi = not first in SQLCmd.NO_SEMI_NEEDED
setvar_match = VARIABLE_ASSIGNMENT_RE.match(s)
if setvar_match:
need_semi = False
s = 'dot_var %s=%s' % (setvar_match.group(1), setvar_match.group(2))
elif first.startswith(SQLCmd.COMMENT_PREFIX):
# Comments are handled specially. Rather than transform them
# into something that'll invoke a "do" method, we handle them
# directly here, then return an empty string. That way, the
# Cmd class's help functions don't notice and expose to view
# special comment methods.
need_semi = False
s = ''
elif first.startswith(SQLCmd.META_COMMAND_PREFIX):
s = ' '.join(['dot_' + first[1:]] + args)
need_semi = False
elif s == "EOF":
need_semi = False
else:
s = ' '.join([first] + args)
if s == "":
pass
elif need_semi and (s[-1] != ';'):
if self.__partial_command == None:
self.__partial_command = s
self.__partial_cmd_history_start = self.__history.get_total()
else:
self.__partial_command = self.__partial_command + ' ' + s
s = ""
self.prompt = SQLCmd.CONTINUATION_PROMPT
self.__in_multiline_command = True
else:
self.__in_multiline_command = False
if self.__partial_command != None:
s = self.__partial_command + ' ' + s
self.__partial_command = None
cmd_start = self.__partial_cmd_history_start
self.__partial_cmd_history_start = None
if self.__flag_is_set('history'):
self.__history.cut_back_to(cmd_start + 1)
self.__history.add_item(s, force=True)
# Strip the trailing ';'
if s[-1] == ';':
s = s[:-1]
self.prompt = SQLCmd.MAIN_PROMPT
return s
def completenames(self, text, *ignored):
"""
Get list of commands, for completion. This version just edits the
base class's results.
"""
if text.startswith('.'):
text = 'dot_' + text[1:]
commands = Cmd.completenames(self, text, ignored)
result = []
for command in commands:
if command.startswith('dot_'):
result.append('.' + command[4:])
else:
result.append(command)
return result
def parseline(self, line):
"""
Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
Overrides the parent class's version of this method, to handle
dot commands.
"""
cmd, arg, line = Cmd.parseline(self, line)
if cmd and cmd.startswith('.'):
s = 'dot'
if len(cmd) > 1:
s += '_%s' % cmd[1:]
cmd = s
return cmd, arg, line
def complete_dot(self, text, line, start_index, end_index):
return [n for n in self.completenames('') if n.startswith('.')]
def do_help(self, arg):
# Capture the output.
old_stdout = self.stdout
old_sys_stdout = sys.stdout
try:
buf = StringIO()
self.stdout = buf
sys.stdout = buf
self.__do_help(arg)
self.stdout = old_stdout
sys.stdout = old_sys_stdout
help = buf.getvalue()
if not help:
help = "%s\n" % str(self.nohelp % (arg,))
lines = help.split('\n')
# Trim leading and trailing blank lines.
def ltrim(lines):
"""
Recursive function to trim (in place) leading blank lines from
an array of lines.
"""
if len(lines) == 0:
return
if len(lines[0]) > 0:
return
del lines[0]
ltrim(lines)
def rtrim(lines):
"""
Recursive function to trim (in place) trailing blank lines
from an array of lines
"""
if len(lines) == 0:
return
if len(lines[-1]) > 0:
return
del lines[-1]
rtrim(lines)
# Figure out initial indent.
indent = 0
first_non_blank = None
for line in lines:
if len(line) > 0:
first_non_blank = line
break
if first_non_blank:
for c in first_non_blank:
# Assumes no tabs.
if c == ' ':
indent += 1
else:
break
prefix = ' ' * indent
new_lines = []
for line in lines:
if line.startswith(prefix):
new_lines.append(line[len(prefix):])
else:
new_lines.append(line)
ltrim(new_lines)
rtrim(new_lines)
self.stdout.write('\n'.join(new_lines))
self.stdout.write('\n')
finally:
sys.stdout = old_sys_stdout
self.stdout = old_stdout
def __do_help(self, arg):
"""
Swiped from the base class's do_help() method and modified
to handle dot commands better.
"""
if arg:
if arg.startswith('.'):
arg = 'dot_' + arg[1:]
try:
func = getattr(self, 'help_' + arg)
func()
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
except AttributeError:
pass
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd.startswith('dot_'):
cmd = '.' + cmd[4:]
if cmd.lower() == 'eof':
continue
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, help.keys(),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def do_redo(self, args):
"""
Re-run a command.
Usage: r [num|string]
redo [num|string]
where 'num' is the number of the command to re-run, as shown in the
'history' display. 'string' is a substring to match against the
command history; for instance, 'r select' attempts to run the last
command starting with 'select'. If called with no arguments, just
re-run the last command.
"""
do_r(args)
def do_r(self, args):
"""
Re-run a command.
Usage: r [num|string]
redo [num|string]
where 'num' is the number of the command to re-run, as shown in the
'history' display. 'string' is a substring to match against the
command history; for instance, 'r select' attempts to run the last
command starting with 'select'. If called with no arguments, just
re-run the last command.
"""
a = args.split()
if len(a) > 1:
raise BadCommandError, 'Too many parameters'
if len(a) == 0:
# Redo last command.
line = self.__history.get_last_item()
else:
try:
line = self.__history.get_item(int(a[0]))
except ValueError:
line = self.__history.get_last_matching_item(a[0])
if line == None:
print "No match."
else:
print line
# Temporarily turn off SQL echo. If this is a SQL command,
# we just echoed it, and we don't want it to be echoed twice.
echo = self.__flag_is_set('echo')
self.__set_setting('echo', False)
self.cmdqueue += [line]
self.__set_setting('echo', echo)
def complete_r(self, text, line, start_index, end_index):
h = self.__history.get_history_list()
h.reverse()
matches = set()
i = 0
for command in h:
i+=1
if len(command.strip()) == 0:
continue
tokens = command.split()
if len(text) == 0:
matches.add(tokens[0])
elif tokens[0].startswith(text):
matches.add(tokens[0])
return list(matches)
def do_select(self, args):
"""
Run a SQL 'SELECT' statement.
"""
self.__ensure_connected()
cursor = self.__db.cursor()
try:
self.__handle_select(args, cursor)
finally:
cursor.close()
if self.__flag_is_set('autocommit'):
self.__db.commit()
def complete_select(self, text, line, start_index, end_index):
return self.__complete_no_context(text)
def do_insert(self, args):
"""
Run a SQL 'INSERT' statement.
"""
self.__handle_update('insert', args)
def complete_insert(self, text, line, start_index, end_index):
return self.__complete_no_context(text)
def do_update(self, args):
"""
Run a SQL 'UPDATE' statement.
"""
self.__handle_update('update', args)
def complete_update(self, text, line, start_index, end_index):
return self.__complete_no_context(text)
def do_delete(self, args):
"""
Run a SQL 'DELETE' statement.
"""
self.__handle_update('delete', args)
def complete_delete(self, text, line, start_index, end_index):
return self.__complete_no_context(text)
def do_create(self, args):
"""
Run a SQL 'CREATE' statement (e.g., 'CREATE TABLE', 'CREATE INDEX')
"""
self.__handle_update('create', args)
def complete_create(self, text, line, start_index, end_index):
return self.__complete_no_context(text)
def do_alter(self, args):
"""
Run a SQL 'ALTER' statement (e.g., 'ALTER TABLE', 'ALTER INDEX')
"""
self.__handle_update('alter', args)
def complete_alter(self, text, line, start_index, end_index):
return self.__complete_no_context(text)
def do_drop(self, args):
"""
Run a SQL 'DROP' statement (e.g., 'DROP TABLE', 'DROP INDEX')
"""
self.__handle_update('drop', args)
def complete_drop(self, text, line, start_index, end_index):
return self.__complete_no_context(text)
def do_begin(self, args):
"""
Begin a SQL transaction. This command is essentially a no-op: It's
ignored in autocommit mode, and irrelevant when autocommit mode is
off. It's there primarily for SQL scripts.
"""
self.__ensure_connected()
if self.__flag_is_set('autocommit'):
log.warning('Autocommit is enabled. "begin" ignored')
def do_commit(self, args):
"""
Commit the current transaction. Ignored if 'autocommit' is enabled.
(Autocommit is enabled by default.)
"""
self.__ensure_connected()
if self.__flag_is_set('autocommit'):
log.warning('Autocommit is enabled. "commit" ignored')
else:
assert self.__db != None
self.__db.commit()
def do_rollback(self, args):
"""
Roll the current transaction back. Ignored if 'autocommit' is enabled.
(Autocommit is enabled by default.)
"""
self.__ensure_connected()
if self.__flag_is_set('autocommit'):
log.warning('Autocommit is enabled. "rollback" ignored')
else:
assert self.__db != None
self.__db.rollback()
def do_EOF(self, args):
"""
Handles an end-of-file on input.
"""
if self.__interactive:
print "\nBye."
self.__save_history()
if self.__db != None:
try:
self.__db.close()
except db.Warning, ex:
log.warning('%s' % str(ex))
except db.Error, ex:
log.error('%s' % str(ex))
return True
def do_dot_about(self, args):
"""
Display information about sqlcmd. Takes no parameters.
"""
import grizzled
print VERSION_STAMP
print '(Using %s, version %s)' % (grizzled.title, grizzled.version)
def do_dot_exit(self, args):
"""
Exit sqlcmd. .exit is equivalent to typing the key sequence
corresponding to an end-of-file condition (Ctrl-D on Unix systems,
Ctrl-Z on Windows).
"""
self.cmdqueue += ['EOF']
def do_dot_set(self, args):
"""
Handles a 'sset' command, to set a sqlcmd variable. With no arguments,
this command displays all sqlcmd settings and values.
Usage: .set [setting value]
"""
self.__echo('.set', args, add_semi=False)
set_args = args.split()
total_args = len(set_args)
if total_args == 0:
self.__show_vars(self.__settings)
return
if total_args != 2:
raise BadCommandError, 'Incorrect number of arguments'
self.__set_setting(set_args[0], set_args[1])
def complete_dot_set(self, text, line, start_index, end_index):
tokens = line.split()
total_tokens = len(tokens)
if (total_tokens == 1) or ((total_tokens == 2) and (line[-1] != ' ')):
# .set _
# or
# .set v_
#
# Complete the things that can be set
names = self.__settings.keys()
names.sort()
if len(text) == 0:
matches = names
else:
matches = [name for name in names if name.startswith(text)]
elif (total_tokens == 2) or (total_tokens == 3):
# .set variable _
#
# or
#
# .set variable v_
#
# So, complete the legal values.
varname = tokens[1]
try:
var = self.__settings[varname]
if var.type == SQLCmd.VAR_TYPES.boolean:
matches = ['true', 'false']
elif var.type == SQLCmd.VAR_TYPES.integer:
sys.stdout.write('\nEnter a number\n%s' % line)
sys.stdout.flush()
elif var.type == SQLCmd.VAR_TYPES.string:
sys.stdout.write('\nEnter a string\n%s' % line)
sys.stdout.flush()
if len(tokens) == 3:
matches = [m for m in matches if m.startswith(tokens[2])]
except KeyError:
matches = []
return matches
def do_dot_h(self, args):
"""
Show the current command history. Identical to the 'hist' and
'history' commands.
Usage: .h
"""
self.__show_history()
def do_dot_hist(self, args):
"""
Show the current command history. Identical to the 'h' command and
'history' commands.
Usage: .hist
"""
self.__show_history()
def do_dot_history(self, args):
"""
Show the current command history. Identical to the 'h' command and
'hist' commands.
Usage: .history
"""
self.__show_history()
def do_dot_show(self, args):
"""
Run the ".show" command. There are several subcommands.
.show database Show information about the connected database.
.show tables [regexp] Show the names of all tables. If <regexp> is
supplied, show only those tables whose names
match the regular expression.
"""
tokens = args.split(None)
if len(tokens) == 0:
raise BadCommandError('Missing argument(s) to ".show".')
cmd = tokens[0]
if cmd.lower() == 'tables':
if len(tokens) > 2:
raise BadCommandError('Usage: .show tables [regexp]')
elif len(tokens) == 1:
match_table = lambda name: True
else:
try:
r = re.compile(tokens[1])
match_table = lambda name: r.match(name)
except re.error, ex:
raise BadCommandError('"%s" is a bad regular '
'expression: %s' %
(tokens[1], ex.message))
self.__echo('.show', args, add_semi=False)
for table in self.__get_tables():
if match_table(table):
print table
elif cmd.lower() == 'database':
self.__echo('.show', args, add_semi=False)
self.__ensure_connected()
wrapper = textwrap.TextWrapper(width=MAX_WIDTH,
subsequent_indent=' ')
cursor = self.__db.cursor()
try:
db_info = cursor.get_rdbms_metadata()
print wrapper.fill('Database: %s' % self.__db_config.database)
if self.__db_config.host:
print wrapper.fill('Host: %s' % self.__db_config.host)
if self.__db_config.port:
print wrapper.fill('Port: %s' % self.__db_config.port)
print wrapper.fill('Vendor: %s' % db_info.vendor)
print wrapper.fill('Product: %s' % db_info.product)
print wrapper.fill('Version: %s' % db_info.version)
finally:
cursor.close()
else:
raise BadCommandError('Unknown argument(s) to command ".show": '
'%s' % args)
def complete_dot_show(self, text, line, start_index, end_index):
possibilities = ['tables', 'database']
matches = []
if len(text) == 0:
matches = possibilities
else:
for arg in possibilities:
if arg.startswith(text):
matches.append(arg)
return matches
def do_dot_desc(self, args):
"""
Describe a table. Identical to the 'describe' command.
Usage: .desc tablename [full]
If 'full' is specified, then the tables indexes are displayed
as well (assuming the underlying DB driver supports retrieving
index metadata).
"""
self.do_dot_describe(args, cmd='.desc')
def do_dot_describe(self, args, cmd='.describe'):
"""
Describe a table. Identical to the 'desc' command.
Usage: .describe tablename [full]
If 'full' is specified, then the tables indexes are displayed
as well (assuming the underlying DB driver supports retrieving
index metadata).
"""
self.__ensure_connected()
cursor = self.__db.cursor()
try:
self.__handle_describe(cmd, args, cursor)
finally:
cursor.close()
def complete_dot_desc(self, text, line, start_index, end_index):
return self.__complete_no_context(text)
def complete_dot_describe(self, text, line, start_index, end_index):
return self.__complete_no_context(text)
def do_dot_echo(self, args):
"""
Echo all remaining arguments to standard output. Useful for
scripts.
Usage:
.echo [args]
"""
if args:
args = args.strip()
print args
def complete_dot_echo(self, text, line, start_index, end_index):
return self.__complete_variables(text)
def do_dot_load(self, args):
"""
Load and run a file full of commands without exiting the command
shell.
Usage: .run file
.load file
"""
self.do_dot_run(args)
def complete_dot_load(self, text, line, start_index, end_index):
return self.complete_dot_run(text, line, start_index, end_index)
def do_dot_vars(self, args):
"""
Display the list of variables that can be substituted into other
input lines. For example:
? table=mytable
? columns="color, size"
? .vars
columns="color, size"
table="mytable"
"""
if self.__variables:
names = self.__variables.keys()
names.sort()
for name in names:
print '%s="%s"' %\
(name, self.__variables[name].replace('"', '\\"'))
def do_dot_var(self, args):
"""
Set a variable that can be interpolated, shell style, within subsequent
commands. For example:
table=mytable
select * from $mytable;
Usage: .var name=value
name=value
"""
match = VARIABLE_ASSIGNMENT_RE.match(args)
if not match:
raise BadCommandError('Illegal .var command.')
variable = match.group(1)
value = match.group(2)
value = value.strip()
if value[0] in ('"', "'"):
if value[-1] != value[0]:
log.error('Missing ending %s in variable value.' %
{'"': 'double quote',
"'": 'single quote'}[value[0]])
return
value = value[1:-1]
if len(value) == 0:
if self.__variables.has_key(variable):
del self.__variables[variable]
else:
new_value = []
for c in value:
if c == '\\':
continue
new_value.append(c)
self.__variables[variable] = value
def do_dot_run(self, args):
"""
Load and run a file full of sqlcmd commands without exiting the SQL
command shell. After the contents of the file have been run through
sqlcmd, you will be prompted again for interactive input (if sqlcmd
is running interactively).
Usage: .run file
.load file
"""
tokens = args.split(None, 1)
if len(tokens) > 1:
raise BadCommandError, 'Too many arguments to ".load"'
try:
self.__run_file(os.path.expanduser(tokens[0]))
except IOError, (ex, msg):
log.error('Unable to load file "%s": %s' % (tokens[0], msg))
def complete_dot_run(self, text, line, start_index, end_index):
matches = []
if text == None:
text == ''
text = text.strip()
if text.startswith('~'):
text = os.path.expanduser(text)
if len(text) == 0:
directory = '.'
filename = None
include_directory = False
elif not (os.path.sep in text):
directory = '.'
filename = text
include_directory = False
else:
if os.path.isdir(text) or text[-1] == os.path.sep:
directory = text
filename = None
else:
directory = os.path.dirname(text)
filename = os.path.basename(text)
include_directory = True
if directory:
files = os.listdir(directory)
if filename:
if filename in files:
matches = [filename]
else:
matches = [f for f in files if f.startswith(filename)]
else:
matches = files
if matches:
matches = [f for f in matches if f[0] != '.']
if include_directory:
matches = [os.path.join(directory, f) for f in matches]
return matches
def do_dot_connect(self, args):
"""
Close the current database connection, and connect to another
database.
Usage: .connect database_alias
where 'database_alias' is a valid database alias from the .sqlcmd
startup file.
"""
tokens = args.split(None, 1)
if len(tokens) > 1:
raise BadCommandError, 'Too many arguments to "connect"'
if len(tokens) == 0:
raise BadCommandError, 'Usage: .connect databasename'
if self.__db != None:
try:
self.__db.close()
except db.Error:
pass
self.set_database(tokens[0])
assert(self.__db_config != None)
self.__connect_to(self.__db_config)
def complete_dot_connect(self, text, line, start_index, end_index):
aliases = self.__config.get_aliases()
if len(text.strip()) > 0:
aliases = [a for a in aliases if a.startswith(text)]
return aliases
def help_settings(self):
print """
There are various settings that control the behavior of sqlcmd. These values
are set via a special structured comment syntax; that way, SQL scripts that
set sqlcmd variables can still be used with other SQL interpreters without
causing problems.
Usage: .set setting value
Boolean settings can take the values "on", "off", "true", "false", "yes",
"no", "0" or "1".
Typing ".set" by itself lists all current settings.
The list of settings, their types, and their meaning follow:
"""
name_width = 0
for v in self.__settings.values():
name_width = max(name_width, len(v.name))
names = self.__settings.keys()
names.sort()
prefix = ' '
desc_width = MAX_WIDTH - name_width - len(prefix) - 2
wrapper = textwrap.TextWrapper(width=desc_width)
for name in names:
v = self.__settings[name]
desc = '(%s) %s Default: %s' %\
(v.type, v.docstring, v.defaultValue)
desc = wrapper.wrap(desc)
print '%s%-*s %s' % (prefix, name_width, v.name, desc[0])
for s in desc[1:]:
print '%s%-*s %s' % (prefix, name_width, ' ', s)
def default(self, s):
args = s.split(None, 1)
command = args[0]
if len(args) == 1:
args = ''
else:
args = args[1]
# If the command begins with "dot_", then it's an unknown dot command.
if command.startswith('dot_'):
command = command.replace('dot_', '.')
log.error('"%s" is an unknown sqlcmd command.' % command)
else:
# Pass through to database engine, as if it were a SELECT.
self.__ensure_connected()
cursor = self.__db.cursor()
try:
self.__handle_select(args, cursor, command=command)
finally:
cursor.close()
if self.__flag_is_set('autocommit'):
self.__db.commit()
def emptyline(self):
pass
def __complete_no_context(self, text):
text = text.strip()
items = []
if len(text) == 0:
items = self.__complete_tables(text)
elif text.startswith(VARIABLE_REFERENCE_PREFIX):
items = self.__complete_variables(text)
else:
items = self.__complete_tables(text)
return items
def __complete_tables(self, text):
items = []
text = text.strip()
tables = self.__get_tables()
if len(text) > 0:
items = [t for t in tables if t.startswith(text)]
else:
items = tables
return items
def __complete_variables(self, text):
items = []
text = text.strip()
if (len(text) > 0) and (text[0] == VARIABLE_REFERENCE_PREFIX):
if len(text) > 1:
text = text[1:]
items = [k for k in self.__variables.keys() if k.startswith(text)]
else:
items = self.__variables.keys()
return ['$%s' % i for i in items]
def __get_tables(self):
self.__ensure_connected()
cursor = self.__db.cursor()
try:
tables = cursor.get_tables()
tables.sort()
return tables
finally:
cursor.close()
def __show_vars(self, var_dict):
width = 0
for name in var_dict.keys():
width = max(width, len(name))
vars = [name for name in var_dict.keys()]
vars.sort()
for name in vars:
v = var_dict[name]
print '%-*s = %s' % (width, v.name, v.strValue())
def __set_setting(self, varname, value):
try:
var = self.__settings[varname]
var.set_value_from_string(value)
except KeyError:
raise BadCommandError('No such setting: "%s"' % varname)
except ValueError:
raise BadCommandError('Bad value "%s" for setting"%s".' %
(value, varname))
def __handle_update(self, command, args):
try:
cursor = self.__db.cursor()
self.__exec_SQL(cursor, command, args)
rows = cursor.rowcount
if rows == None:
print "No row count available."
else:
pl = ''
if rows < 0:
rows = 0
if rows != 1:
pl = 's'
print '%d row%s' % (rows, pl)
except db.Error:
raise
else:
cursor.close()
if self.__flag_is_set('autocommit'):
self.__db.commit()
def __handle_select(self, args, cursor, command="select"):
fd, temp = tempfile.mkstemp(".dat", "sqlcmd")
os.close(fd)
self.__exec_SQL(cursor, command, args)
# Don't rely on the row count from the cursor. It isn't always
# reliable.
rows, col_names, col_sizes = self.__calculate_column_sizes(cursor, temp)
pl = ""
if rows != 1:
pl = "s"
print "%d row%s\n" % (rows, pl)
if rows > 0:
self.__dump_result_set(rows, col_names, col_sizes, temp, cursor)
def __dump_result_set(self, rows, col_names, col_sizes, temp, cursor):
# Now, dump the header with the column names, being sure to
# honor the padding sizes.
headers = []
rules = []
for i in range(0, len(col_names)):
headers += ['%-*s' % (col_sizes[i], col_names[i])]
rules += ['-' * col_sizes[i]]
spacing = ' ' * self.__settings['colspacing'].value
print spacing.join(headers)
print spacing.join(rules)
# Finally, read back the data and dump it.
max_binary = self.__settings['binarymax'].value
if max_binary < 0:
max_binary = sys.maxint
f = open(temp)
eof = False
while not eof:
try:
rs = cPickle.load(f)
except EOFError:
break
data = []
i = 0
for col_value in rs:
if col_value == None:
col_value = "NULL"
col_info = cursor.description[i]
type = col_info[1]
strValue = ""
format = '%-*s' # left justify
if type == self.__db.BINARY:
if self.__flag_is_set('showbinary'):
strValue = col_value.translate(SQLCmd.BINARY_FILTER)
if len(strValue) > max_binary:
strValue = strValue[:max_binary]
else:
strValue = SQLCmd.BINARY_VALUE_MARKER
elif type == self.__db.NUMBER:
format = '%*s' # right justify
if col_value == "NULL":
pass
elif (col_value - int(col_value)) == 0:
strValue = int(col_value)
else:
strValue = str(col_value)
else:
strValue = unicode(col_value)
data += [format % (col_sizes[i], strValue)]
i += 1
print spacing.join(data)
print ''
f.close()
try:
os.remove(temp)
os.close(fd)
except:
pass
def __calculate_column_sizes(self, cursor, temp_file):
col_names = []
col_sizes = []
rows = 0
if cursor.description:
for col in cursor.description:
col_names += [col[0]]
name_size = len(col[0])
if col[1] == self.__db.BINARY:
col_sizes += [max(name_size, len(SQLCmd.BINARY_VALUE_MARKER))]
else:
col_sizes += [name_size]
# Write the results (pickled) to a temporary file. We'll iterate
# through them twice: Once to calculate the column sizes, the
# second time to display them.
if cursor.rowcount > 1000:
print "Processing result set..."
max_binary = self.__settings['binarymax'].value
if max_binary < 0:
max_binary = sys.maxint
f = open(temp_file, "w")
rs = cursor.fetchone()
while rs != None:
cPickle.dump(rs, f)
i = 0
rows += 1
for col_value in rs:
col_info = cursor.description[i]
type = col_info[1]
if type == self.__db.BINARY:
if self.__flag_is_set('showbinary'):
size = len(col_value.translate(SQLCmd.BINARY_FILTER))
size = min(size, max_binary)
else:
size = len(SQLCmd.BINARY_VALUE_MARKER)
else:
size = len(unicode(col_value))
col_sizes[i] = max(col_sizes[i], size)
i += 1
rs = cursor.fetchone()
f.close()
return (rows, col_names, col_sizes)
def __handle_describe(self, cmd, args, cursor):
self.__echo(cmd, args)
a = args.split()
if not len(a) in (1, 2):
raise BadCommandError, 'Usage: describe table [full]'
full = False
if (len(a) == 2):
if a[1].lower() != 'full':
raise BadCommandError, 'Usage: describe table [full]'
else:
full = True
table = a[0]
results = cursor.get_table_metadata(table)
width = 0
for col in results:
name = col[0]
width = max(width, len(name))
header = 'Table %s:' % table
dashes = '-' * len(header)
print '%s' % dashes
print '%s' % header
print '%s\n' % dashes
for col in results:
name = col[0]
type = col[1]
char_size = col[2]
precision = col[3]
scale = col[4]
nullable = col[5]
stype = type
if (char_size != None) and (char_size > 0):
stype = '%s(%s)' % (type, char_size)
elif precision != None:
stype = type
sep = '('
if (precision != None) and (precision > 0):
stype = stype + sep + str(precision)
sep = ', '
if (scale != None) and (scale > 0):
stype = stype + sep + str(scale)
if sep != '(':
stype = stype + ')'
if nullable == None:
snull = ''
elif nullable:
snull = 'NULL'
else:
snull = 'NOT NULL'
print '%-*s %s %s' % (width, name, stype, snull)
if full:
print '\n--------\nIndexes:\n--------\n'
indexes = cursor.get_index_metadata(table)
if not indexes:
print 'No indexes.'
else:
width = 0
for index_data in indexes:
width = max(width, len(index_data[0]))
wrapper = textwrap.TextWrapper(width=MAX_WIDTH)
wrapper.subsequent_indent = ' ' * (width + 14)
sep = None
for index_data in indexes:
name = index_data[0]
columns = index_data[1]
desc = index_data[2]
if sep != None:
print sep
s = '%-*s Columns: %s' % \
(width, name, ', '.join(columns))
print '\n'.join(wrapper.wrap(s))
if desc:
s = '%*s Description: %s' % \
(width, ' ', desc)
print '\n'.join(wrapper.wrap(s))
sep = '---------------------------------------' \
'---------------------------------------'
print ''
def __handle_exception(self, ex):
if isinstance(ex, NonFatalError):
log.error('%s' % ex.message)
if self.__flag_is_set('stacktrace'):
traceback.print_exc()
elif isinstance(ex, db.Warning):
log.warning('%s' % ex.message)
else:
log.error('%s' % ex.message)
if self.__flag_is_set('stacktrace'):
traceback.print_exc()
if self.__db != None: # mostly a hack for PostgreSQL
try:
self.__db.rollback()
except db.Error:
pass
def __exec_SQL(self, cursor, sql_command, args):
self.__echo(sql_command, args)
start_elapsed = time.time()
cursor.execute(' '.join([sql_command, args]))
end_elapsed = time.time()
if self.__flag_is_set('timings'):
total_elapsed = end_elapsed - start_elapsed
print 'Execution time: %5.3f seconds' % total_elapsed
def __init_settings_from_config(self):
errors = []
for varname, value in self.__config.settings.items():
try:
self.__set_setting(varname, value)
except BadCommandError, ex:
errors.append(ex.message)
if errors:
log.error('In configuration file "%s", section [%s]:\n%s\n' %
(self.__config.path,
self.__config.variables_section,
'\n'.join(errors)))
def __init_history(self):
self.__history = history.get_history()
self.__history.max_length = SQLCmd.DEFAULT_HISTORY_MAX
completer_delims = self.__history.get_completer_delims()
new_delims = ''
for c in completer_delims:
if c not in ['~', '/', '$']:
new_delims += c
self.__history.set_completer_delims(new_delims)
if self.__history_file != None:
try:
print 'Loading history file "%s"' % self.__history_file
self.__history.load_history_file(self.__history_file)
except IOError:
pass
def __echo(self, *args, **kw):
if self.__flag_is_set('echo'):
semi = ''
if kw.get('add_semi', True):
semi = ';'
cmd = ' '.join([a for a in args]).strip()
print '\n%s%s\n' % (cmd, semi)
def __flag_is_set(self, varname):
return self.__settings[varname].value
def __save_history(self):
if (self.__history_file != None) and (self.save_history):
try:
print 'Saving history file "%s"' % self.__history_file
self.__history.save_history_file(self.__history_file)
except IOError, (errno, message):
sys.stderr.write('Unable to save history file "%s": %s\n' % \
(HISTORY_FILE, message))
def __show_history(self):
self.__history.show()
def __run_file(self, file):
try:
with open(file) as f:
history = self.__flag_is_set('history')
#if history:
#self.cmdqueue += '.set history false'
for line in f.readlines():
if line[-1] == '\n':
line = line[:-1] # chop \n
self.cmdqueue += [line]
if history:
self.cmdqueue += ['.set history true']
except IOError, ex:
log.error('Cannot run file "%s": %s' % (file, str(ex)))
def __connect_to(self, db_config):
if self.__db != None:
self.__save_history()
driver = db.get_driver(db_config.db_type)
print 'Connecting to %s database "%s" on host %s.' %\
(driver.display_name, db_config.database, db_config.host)
self.__db = driver.connect(host=db_config.host,
port=db_config.port,
user=db_config.user,
password=db_config.password,
database=db_config.database)
history_file = HISTORY_FILE_FORMAT % db_config.primary_alias
self.__history_file = os.path.expanduser(history_file)
self.__init_history()
if db_config.on_connect:
log.debug('Running on-connect script "%s"' % db_config.on_connect)
self.__run_file(db_config.on_connect)
def __ensure_connected(self):
if self.__db == None:
raise NotConnectedError, 'Not connected to a database.'
LOG_LEVELS = { 'debug' : logging.DEBUG,
'info' : logging.INFO,
'warning' : logging.WARNING,
'error' : logging.ERROR,
'critical' : logging.CRITICAL }
class Main(object):
def __init__(self):
pass
def run(self, argv):
self.__parse_params(argv)
# Initialize logging
self.__init_logging(self.__log_level, self.__log_file)
# Load the configuration
cfg = SQLCmdConfig(os.path.dirname(self.__config_file))
try:
cfg.load_file(self.__config_file)
except IOError, ex:
log.warning(str(ex))
except ConfigurationError, ex:
die(str(ex))
# Load the history
try:
save_history = True
if self.__db_connect_info:
(db, dbType, hp, user, pw) = self.__db_connect_info
host = hp
port = None
if ':' in hp:
(host, port) = hp.split(':', 2)
cfg.add("__cmdline__", # dummy section name
"__cmdline__", # alias
host,
port,
db,
dbType,
user,
pw)
self.__alias = "__cmdline__"
save_history = False
assert(self.__alias)
cmd = SQLCmd(cfg)
cmd.save_history = save_history
cmd.set_database(self.__alias)
except ConfigurationError, ex:
die(str(ex))
if self.__input_file:
try:
cmd.run_file_and_exit(self.__input_file)
except IOError, (ex, errormsg):
die('Failed to load file "%s": %s' %\
(self.__input_file, errormsg))
else:
cmd.cmdloop()
def __parse_params(self, argv):
USAGE = 'Usage: %prog [OPTIONS] [alias] [@file]'
opt_parser = CommandLineParser(usage=USAGE)
opt_parser.add_option('-c', '--config', action='store', dest='config',
default=RC_FILE,
help='Specifies the configuration file to use. '
'Defaults to "%default".')
opt_parser.add_option('-d', '--db', action='store', dest='database',
help='Database to use. Format: '
'database,dbtype,host[:port],user,password')
opt_parser.add_option('-l', '--loglevel', action='store',
dest='loglevel',
help='Enable log messages as level "n", where ' \
'"n" is one of: %s' % ', '.join(LOG_LEVELS),
default='info')
opt_parser.add_option('-L', '--logfile', action='store', dest='logfile',
help='Dump log messages to LOGFILE, instead of ' \
'standard output')
opt_parser.add_option('-v', '--version', action='store_true',
dest='show_version',
help='Show the version stamp and exit.')
options, args = opt_parser.parse_args(argv)
if options.show_version:
print VERSION_STAMP
sys.exit(0)
args = args[1:]
if not len(args) in (0, 1, 2):
opt_parser.die_with_usage('Incorrect number of parameters')
if options.loglevel:
if not (options.loglevel in LOG_LEVELS):
opt_parser.showUsage('Bad value "%s" for log level.' %\
options.loglevel)
self.__input_file = None
self.__alias = None
self.__db_connect_info = None
self.__log_level = LOG_LEVELS[options.loglevel]
self.__log_file = options.logfile
self.__config_file = options.config
if len(args) == 0:
pass # handled below
elif len(args) == 1:
if args[0].startswith('@'):
self.__input_file = args[0][1:]
else:
self.__alias = args[0]
else:
self.__alias = args[0]
if not args[1].startswith('@'):
opt_parser.die_with_usage('File parameter must start with "@"')
self.__input_file = args[1][1:]
if options.database:
self.__db_connect_info = options.database.split(',')
if len(self.__db_connect_info) != 5:
opt_parser.die_with_usage('Bad argument "%s" to -d option' %\
options.database)
if not (self.__db_connect_info or self.__alias):
opt_parser.die_with_usage('You must specify either an alias or a '
'valid argument to "-d"')
if self.__db_connect_info and self.__alias:
opt_parser.die_with_usage('You cannot specify both an alias '
'and "-d"')
def __init_logging(self, level, filename):
"""Initialize logging subsystem"""
date_format = '%H:%M:%S'
if level == None:
level = logging.WARNING
logging.basicConfig(level=level)
stderr_handler = logging.StreamHandler(sys.stderr)
formatter = WrappingLogFormatter(format='%(levelname)s: %(message)s')
stderr_handler.setLevel(level)
stderr_handler.setFormatter(formatter)
handlers = [stderr_handler]
if filename:
file_handler = logging.FileHandler(filename)
handlers.append(file_handler)
msg_format = '%(asctime)s %(levelname)s (%(name)s) %(message)s'
formatter = WrappingLogFormatter(format=msg_format,
date_format=date_format)
file_handler.setLevel(level)
file_handler.setFormatter(formatter)
global log
log = logging.getLogger('sqlcmd')
root_logger = logging.getLogger('')
root_logger.handlers = handlers
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == '__main__':
sys.exit(main())
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/sqlcmd/sqlcmd/__init__.py
|
Python
|
bsd-3-clause
| 61,025
|
[
"Brian"
] |
a2326eeabe6650a280ce500bad2d2682bcce72178f4c616ac59234d65d1bc0c9
|
# -*- coding: utf-8 -*-
# Copyright © 2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from docutils import nodes
from docutils.parsers.rst import roles
from docutils.transforms import Transform
from nikola.utils import LOGGER
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
name = "emoji"
def set_site(self, site):
self.site = site
roles.register_local_role('emoji', emoji_role)
site.rst_transforms.append(Emojis)
def emoji_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
text = text.lower()
LOGGER.warn('The role :emoji:`{0}` is deprecated. Use |{0}| instead'.format(text))
node = nodes.image(
uri='https://cdnjs.cloudflare.com/ajax/libs/emojify.js/1.1.0/images/basic/{0}.png'.format(text),
alt=text,
classes=['emoji'],
)
return [node], []
class Emojis(Transform):
"""
Replace some substitutions if they aren't defined in the document.
"""
# run before the default Substitutions
default_priority = 210
# list from http://www.tortue.me/
emojis = set([
'bowtie', # NOQA
'smile',
'laughing',
'blush',
'smiley',
'relaxed',
'smirk',
'heart_eyes',
'kissing_heart',
'kissing_closed_eyes',
'flushed',
'relieved',
'satisfied',
'grin',
'wink',
'stuck_out_tongue_winking_eye',
'stuck_out_tongue_closed_eyes',
'grinning',
'kissing',
'kissing_smiling_eyes',
'stuck_out_tongue',
'sleeping',
'worried',
'frowning',
'anguished',
'open_mouth',
'grimacing',
'confused',
'hushed',
'expressionless',
'unamused',
'sweat_smile',
'sweat',
'weary',
'pensive',
'disappointed',
'confounded',
'fearful',
'cold_sweat',
'persevere',
'cry',
'sob',
'joy',
'astonished',
'scream',
'neckbeard',
'tired_face',
'angry',
'rage',
'triumph',
'sleepy',
'yum',
'mask',
'sunglasses',
'dizzy_face',
'imp',
'smiling_imp',
'neutral_face',
'no_mouth',
'innocent',
'alien',
'yellow_heart',
'blue_heart',
'purple_heart',
'heart',
'green_heart',
'broken_heart',
'heartbeat',
'heartpulse',
'two_hearts',
'revolving_hearts',
'cupid',
'sparkling_heart',
'sparkles',
'star',
'star2',
'dizzy',
'boom',
'collision',
'anger',
'exclamation',
'question',
'grey_exclamation',
'grey_question',
'zzz',
'dash',
'sweat_drops',
'notes',
'musical_note',
'fire',
'hankey',
'poop',
'shit',
'+1',
'thumbsup',
'-1',
'thumbsdown',
'ok_hand',
'punch',
'facepunch',
'fist',
'v',
'wave',
'hand',
'open_hands',
'point_up',
'point_down',
'point_left',
'point_right',
'raised_hands',
'pray',
'point_up_2',
'clap',
'muscle',
'metal',
'walking',
'runner',
'running',
'couple',
'family',
'two_men_holding_hands',
'two_women_holding_hands',
'dancer',
'dancers',
'ok_woman',
'no_good',
'information_desk_person',
'raised_hand',
'bride_with_veil',
'person_with_pouting_face',
'person_frowning',
'bow',
'couplekiss',
'couple_with_heart',
'massage',
'haircut',
'nail_care',
'boy',
'girl',
'woman',
'man',
'baby',
'older_woman',
'older_man',
'person_with_blond_hair',
'man_with_gua_pi_mao',
'man_with_turban',
'construction_worker',
'cop',
'angel',
'princess',
'smiley_cat',
'smile_cat',
'heart_eyes_cat',
'kissing_cat',
'smirk_cat',
'scream_cat',
'crying_cat_face',
'joy_cat',
'pouting_cat',
'japanese_ogre',
'japanese_goblin',
'see_no_evil',
'hear_no_evil',
'speak_no_evil',
'guardsman',
'skull',
'feet',
'lips',
'kiss',
'droplet',
'ear',
'eyes',
'nose',
'tongue',
'love_letter',
'bust_in_silhouette',
'busts_in_silhouette',
'speech_balloon',
'thought_balloon',
'feelsgood',
'finnadie',
'goberserk',
'godmode',
'hurtrealbad',
'rage1',
'rage2',
'rage3',
'rage4',
'suspect',
'trollface',
'sunny',
'umbrella',
'cloud',
'snowflake',
'snowman',
'zap',
'cyclone',
'foggy',
'ocean',
'cat',
'dog',
'mouse',
'hamster',
'rabbit',
'wolf',
'frog',
'tiger',
'koala',
'bear',
'pig',
'pig_nose',
'cow',
'boar',
'monkey_face',
'monkey',
'horse',
'racehorse',
'camel',
'sheep',
'elephant',
'panda_face',
'snake',
'bird',
'baby_chick',
'hatched_chick',
'hatching_chick',
'chicken',
'penguin',
'turtle',
'bug',
'honeybee',
'ant',
'beetle',
'snail',
'octopus',
'tropical_fish',
'fish',
'whale',
'whale2',
'dolphin',
'cow2',
'ram',
'rat',
'water_buffalo',
'tiger2',
'rabbit2',
'dragon',
'goat',
'rooster',
'dog2',
'pig2',
'mouse2',
'ox',
'dragon_face',
'blowfish',
'crocodile',
'dromedary_camel',
'leopard',
'cat2',
'poodle',
'paw_prints',
'bouquet',
'cherry_blossom',
'tulip',
'four_leaf_clover',
'rose',
'sunflower',
'hibiscus',
'maple_leaf',
'leaves',
'fallen_leaf',
'herb',
'mushroom',
'cactus',
'palm_tree',
'evergreen_tree',
'deciduous_tree',
'chestnut',
'seedling',
'blossom',
'ear_of_rice',
'shell',
'globe_with_meridians',
'sun_with_face',
'full_moon_with_face',
'new_moon_with_face',
'new_moon',
'waxing_crescent_moon',
'first_quarter_moon',
'waxing_gibbous_moon',
'full_moon',
'waning_gibbous_moon',
'last_quarter_moon',
'waning_crescent_moon',
'last_quarter_moon_with_face',
'first_quarter_moon_with_face',
'moon',
'earth_africa',
'earth_americas',
'earth_asia',
'volcano',
'milky_way',
'partly_sunny',
'octocat',
'squirrel',
'bamboo',
'gift_heart',
'dolls',
'school_satchel',
'mortar_board',
'flags',
'fireworks',
'sparkler',
'wind_chime',
'rice_scene',
'jack_o_lantern',
'ghost',
'santa',
'christmas_tree',
'gift',
'bell',
'no_bell',
'tanabata_tree',
'tada',
'confetti_ball',
'balloon',
'crystal_ball',
'cd',
'dvd',
'floppy_disk',
'camera',
'video_camera',
'movie_camera',
'computer',
'tv',
'iphone',
'phone',
'telephone',
'telephone_receiver',
'pager',
'fax',
'minidisc',
'vhs',
'sound',
'speaker',
'mute',
'loudspeaker',
'mega',
'hourglass',
'hourglass_flowing_sand',
'alarm_clock',
'watch',
'radio',
'satellite',
'loop',
'mag',
'mag_right',
'unlock',
'lock',
'lock_with_ink_pen',
'closed_lock_with_key',
'key',
'bulb',
'flashlight',
'high_brightness',
'low_brightness',
'electric_plug',
'battery',
'calling',
'email',
'mailbox',
'postbox',
'bath',
'bathtub',
'shower',
'toilet',
'wrench',
'nut_and_bolt',
'hammer',
'seat',
'moneybag',
'yen',
'dollar',
'pound',
'euro',
'credit_card',
'money_with_wings',
'e-mail',
'inbox_tray',
'outbox_tray',
'envelope',
'incoming_envelope',
'postal_horn',
'mailbox_closed',
'mailbox_with_mail',
'mailbox_with_no_mail',
'door',
'smoking',
'bomb',
'gun',
'hocho',
'pill',
'syringe',
'page_facing_up',
'page_with_curl',
'bookmark_tabs',
'bar_chart',
'chart_with_upwards_trend',
'chart_with_downwards_trend',
'scroll',
'clipboard',
'calendar',
'date',
'card_index',
'file_folder',
'open_file_folder',
'scissors',
'pushpin',
'paperclip',
'black_nib',
'pencil2',
'straight_ruler',
'triangular_ruler',
'closed_book',
'green_book',
'blue_book',
'orange_book',
'notebook',
'notebook_with_decorative_cover',
'ledger',
'books',
'bookmark',
'name_badge',
'microscope',
'telescope',
'newspaper',
'football',
'basketball',
'soccer',
'baseball',
'tennis',
'8ball',
'rugby_football',
'bowling',
'golf',
'mountain_bicyclist',
'bicyclist',
'horse_racing',
'snowboarder',
'swimmer',
'surfer',
'ski',
'spades',
'hearts',
'clubs',
'diamonds',
'gem',
'ring',
'trophy',
'musical_score',
'musical_keyboard',
'violin',
'space_invader',
'video_game',
'black_joker',
'flower_playing_cards',
'game_die',
'dart',
'mahjong',
'clapper',
'memo',
'pencil',
'book',
'art',
'microphone',
'headphones',
'trumpet',
'saxophone',
'guitar',
'shoe',
'sandal',
'high_heel',
'lipstick',
'boot',
'shirt',
'tshirt',
'necktie',
'womans_clothes',
'dress',
'running_shirt_with_sash',
'jeans',
'kimono',
'bikini',
'ribbon',
'tophat',
'crown',
'womans_hat',
'mans_shoe',
'closed_umbrella',
'briefcase',
'handbag',
'pouch',
'purse',
'eyeglasses',
'fishing_pole_and_fish',
'coffee',
'tea',
'sake',
'baby_bottle',
'beer',
'beers',
'cocktail',
'tropical_drink',
'wine_glass',
'fork_and_knife',
'pizza',
'hamburger',
'fries',
'poultry_leg',
'meat_on_bone',
'spaghetti',
'curry',
'fried_shrimp',
'bento',
'sushi',
'fish_cake',
'rice_ball',
'rice_cracker',
'rice',
'ramen',
'stew',
'oden',
'dango',
'egg',
'bread',
'doughnut',
'custard',
'icecream',
'ice_cream',
'shaved_ice',
'birthday',
'cake',
'cookie',
'chocolate_bar',
'candy',
'lollipop',
'honey_pot',
'apple',
'green_apple',
'tangerine',
'lemon',
'cherries',
'grapes',
'watermelon',
'strawberry',
'peach',
'melon',
'banana',
'pear',
'pineapple',
'sweet_potato',
'eggplant',
'tomato',
'corn',
'109',
'house',
'house_with_garden',
'school',
'office',
'post_office',
'hospital',
'bank',
'convenience_store',
'love_hotel',
'hotel',
'wedding',
'church',
'department_store',
'european_post_office',
'city_sunrise',
'city_sunset',
'japanese_castle',
'european_castle',
'tent',
'factory',
'tokyo_tower',
'japan',
'mount_fuji',
'sunrise_over_mountains',
'sunrise',
'stars',
'statue_of_liberty',
'bridge_at_night',
'carousel_horse',
'rainbow',
'ferris_wheel',
'fountain',
'roller_coaster',
'ship',
'speedboat',
'boat',
'sailboat',
'rowboat',
'anchor',
'rocket',
'airplane',
'helicopter',
'steam_locomotive',
'tram',
'mountain_railway',
'bike',
'aerial_tramway',
'suspension_railway',
'mountain_cableway',
'tractor',
'blue_car',
'oncoming_automobile',
'car',
'red_car',
'taxi',
'oncoming_taxi',
'articulated_lorry',
'bus',
'oncoming_bus',
'rotating_light',
'police_car',
'oncoming_police_car',
'fire_engine',
'ambulance',
'minibus',
'truck',
'train',
'station',
'train2',
'bullettrain_front',
'bullettrain_side',
'light_rail',
'monorail',
'railway_car',
'trolleybus',
'ticket',
'fuelpump',
'vertical_traffic_light',
'traffic_light',
'warning',
'construction',
'beginner',
'atm',
'slot_machine',
'busstop',
'barber',
'hotsprings',
'checkered_flag',
'crossed_flags',
'izakaya_lantern',
'moyai',
'circus_tent',
'performing_arts',
'round_pushpin',
'triangular_flag_on_post',
'jp',
'kr',
'cn',
'us',
'fr',
'es',
'it',
'ru',
'gb',
'uk',
'de',
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'keycap_ten',
'1234',
'zero',
'hash',
'symbols',
'arrow_backward',
'arrow_down',
'arrow_forward',
'arrow_left',
'capital_abcd',
'abcd',
'abc',
'arrow_lower_left',
'arrow_lower_right',
'arrow_right',
'arrow_up',
'arrow_upper_left',
'arrow_upper_right',
'arrow_double_down',
'arrow_double_up',
'arrow_down_small',
'arrow_heading_down',
'arrow_heading_up',
'leftwards_arrow_with_hook',
'arrow_right_hook',
'left_right_arrow',
'arrow_up_down',
'arrow_up_small',
'arrows_clockwise',
'arrows_counterclockwise',
'rewind',
'fast_forward',
'information_source',
'ok',
'twisted_rightwards_arrows',
'repeat',
'repeat_one',
'new',
'top',
'up',
'cool',
'free',
'ng',
'cinema',
'koko',
'signal_strength',
'u5272',
'u5408',
'u55b6',
'u6307',
'u6708',
'u6709',
'u6e80',
'u7121',
'u7533',
'u7a7a',
'u7981',
'sa',
'restroom',
'mens',
'womens',
'baby_symbol',
'no_smoking',
'parking',
'wheelchair',
'metro',
'baggage_claim',
'accept',
'wc',
'potable_water',
'put_litter_in_its_place',
'secret',
'congratulations',
'm',
'passport_control',
'left_luggage',
'customs',
'ideograph_advantage',
'cl',
'sos',
'id',
'no_entry_sign',
'underage',
'no_mobile_phones',
'do_not_litter',
'non-potable_water',
'no_bicycles',
'no_pedestrians',
'children_crossing',
'no_entry',
'eight_spoked_asterisk',
'eight_pointed_black_star',
'heart_decoration',
'vs',
'vibration_mode',
'mobile_phone_off',
'chart',
'currency_exchange',
'aries',
'taurus',
'gemini',
'cancer',
'leo',
'virgo',
'libra',
'scorpius',
'sagittarius',
'capricorn',
'aquarius',
'pisces',
'ophiuchus',
'six_pointed_star',
'negative_squared_cross_mark',
'a',
'b',
'ab',
'o2',
'diamond_shape_with_a_dot_inside',
'recycle',
'end',
'true',
'soon',
'clock1',
'clock130',
'clock10',
'clock1030',
'clock11',
'clock1130',
'clock12',
'clock1230',
'clock2',
'clock230',
'clock3',
'clock330',
'clock4',
'clock430',
'clock5',
'clock530',
'clock6',
'clock630',
'clock7',
'clock730',
'clock8',
'clock830',
'clock9',
'clock930',
'heavy_dollar_sign',
'copyright',
'registered',
'tm',
'x',
'heavy_exclamation_mark',
'bangbang',
'interrobang',
'o',
'heavy_multiplication_x',
'heavy_plus_sign',
'heavy_minus_sign',
'heavy_division_sign',
'white_flower',
'100',
'heavy_check_mark',
'ballot_box_with_check',
'radio_button',
'link',
'curly_loop',
'wavy_dash',
'part_alternation_mark',
'trident',
'black_square',
'white_square',
'white_check_mark',
'black_square_button',
'white_square_button',
'black_circle',
'white_circle',
'red_circle',
'large_blue_circle',
'large_blue_diamond',
'large_orange_diamond',
'small_blue_diamond',
'small_orange_diamond',
'small_red_triangle',
'small_red_triangle_down',
'shipit'])
def apply(self, **kwargs):
# only handle those not otherwise defined in the document
to_handle = self.emojis - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
node = nodes.image(
uri='https://cdnjs.cloudflare.com/ajax/libs/emojify.js/1.1.0/images/basic/{0}.png'.format(refname),
alt=refname,
classes=['emoji'],
height="24px",
width="24px")
ref.replace_self(node)
|
getnikola/plugins
|
v7/emoji/emoji.py
|
Python
|
mit
| 27,893
|
[
"Bowtie",
"Octopus"
] |
82eab70c026a064da2df3ec43a1b6c46f2297a1d2b8c91ba07ec32a2e6e0c97b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# ChemPy - A chemistry toolkit for Python
#
# Copyright (c) 2010 by Joshua W. Allen (jwallen@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import Cython.Compiler.Options
import numpy
# Create annotated HTML files for each of the Cython modules
Cython.Compiler.Options.annotate = True
# Turn on profiling capacity for all Cython modules
#Cython.Compiler.Options.directive_defaults['profile'] = True
# The Cython modules to setup
# This is a more standard way of doing things, but Cython doesn't like it as much
packages=['chempy']
ext_modules = [
Extension('chempy.constants', ['chempy/constants.py']),
Extension('chempy.element', ['chempy/element.py']),
Extension('chempy.graph', ['chempy/graph.py']),
Extension('chempy.geometry', ['chempy/geometry.py']),
Extension('chempy.kinetics', ['chempy/kinetics.py']),
Extension('chempy.molecule', ['chempy/molecule.py']),
Extension('chempy.pattern', ['chempy/pattern.py']),
Extension('chempy.reaction', ['chempy/reaction.py']),
Extension('chempy.species', ['chempy/species.py']),
Extension('chempy.states', ['chempy/states.py']),
Extension('chempy.thermo', ['chempy/thermo.py']),
Extension('chempy.ext.thermo_converter', ['chempy/ext/thermo_converter.py']),
]
setup(name='ChemPy',
version='0.1.0',
description='A chemistry toolkit for Python',
author='Joshua W. Allen',
author_email='jwallen@mit.edu',
url='',
packages=packages,
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
include_dirs=[numpy.get_include()],
)
|
jwallen/ChemPy
|
setup.py
|
Python
|
mit
| 2,948
|
[
"ChemPy"
] |
bce72303fe97a18a97a6ba41bc3071d911eca71cf83ad6fdc9e007151fab96b5
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The frontend for the Mojo bindings system."""
import argparse
import imp
import os
import pprint
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(script_dir, "pylib"))
from generate import mojom_data
from parse import mojo_parser
from parse import mojo_translate
def LoadGenerators(generators_string):
if not generators_string:
return [] # No generators.
generators = []
for generator_name in [s.strip() for s in generators_string.split(",")]:
# "Built-in" generators:
if generator_name.lower() == "c++":
generator_name = os.path.join(script_dir, "generators",
"mojom_cpp_generator.py")
elif generator_name.lower() == "javascript":
generator_name = os.path.join(script_dir, "generators",
"mojom_js_generator.py")
# Specified generator python module:
elif generator_name.endswith(".py"):
pass
else:
print "Unknown generator name %s" % generator_name
sys.exit(1)
generator_module = imp.load_source(os.path.basename(generator_name)[:-3],
generator_name)
generators.append(generator_module)
return generators
def ProcessFile(args, generator_modules, filename, processed_files):
# Ensure we only visit each file once.
if filename in processed_files:
if processed_files[filename] is None:
raise Exception("Circular dependency: " + filename)
return processed_files[filename]
processed_files[filename] = None
dirname, name = os.path.split(filename)
name = os.path.splitext(name)[0]
# TODO(darin): There's clearly too many layers of translation here! We can
# at least avoid generating the serialized Mojom IR.
tree = mojo_parser.Parse(filename)
mojom = mojo_translate.Translate(tree, name)
if args.debug_print_intermediate:
pprint.PrettyPrinter().pprint(mojom)
# Process all our imports first and collect the module object for each.
# We use these to generate proper type info.
for import_data in mojom['imports']:
import_filename = os.path.join(dirname, import_data['filename'])
import_data['module'] = ProcessFile(
args, generator_modules, import_filename, processed_files)
module = mojom_data.OrderedModuleFromData(mojom)
for generator_module in generator_modules:
generator = generator_module.Generator(module, args.include_dir,
args.output_dir)
generator.GenerateFiles()
processed_files[filename] = module
return module
def Main():
parser = argparse.ArgumentParser(
description="Generate bindings from mojom files.")
parser.add_argument("filename", nargs="+",
help="mojom input file")
parser.add_argument("-i", "--include_dir", dest="include_dir", default=".",
help="include path for #includes")
parser.add_argument("-o", "--output_dir", dest="output_dir", default=".",
help="output directory for generated files")
parser.add_argument("-g", "--generators", dest="generators_string",
metavar="GENERATORS", default="c++,javascript",
help="comma-separated list of generators")
parser.add_argument("--debug_print_intermediate", action="store_true",
help="print the intermediate representation")
args = parser.parse_args()
generator_modules = LoadGenerators(args.generators_string)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for filename in args.filename:
ProcessFile(args, generator_modules, filename, {})
return 0
if __name__ == "__main__":
sys.exit(Main())
|
ChromiumWebApps/chromium
|
mojo/public/bindings/mojom_bindings_generator.py
|
Python
|
bsd-3-clause
| 3,912
|
[
"VisIt"
] |
113932cedcbdb48607b0f02729162b36cdd2c8a34f661efc4037f51bd093c3a7
|
from dirac.lib.base import *
from dirac.lib.diset import getRPCClient
from dirac.lib.credentials import authorizeAction, getUsername, getSelectedGroup
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC import gConfig, gLogger
from DIRAC import S_OK, S_ERROR
from DIRAC.FrameworkSystem.Client.UserProfileClient import UserProfileClient
import json
USER_PROFILE_NAME = "Presenter"
LOAD_LAYOUT_ARGS = [ "name" , "user" , "group" ]
SAVE_LAYOUT_ARGS = [ "name" , "user" , "group" , "permissions" ]
DELETE_LAYOUT_ARGS = [ "name" ]
class PresenterController(BaseController):
################################################################################
def display(self):
gLogger.info( "Running display()" )
msg = "display() for %s@%s" % ( getUsername() , getSelectedGroup() )
if not authorizeAction():
gLogger.info( "Result %s: %s" % ( msg , "Not authorized" ) )
return render( "/login.mako" )
result = self.__convert()
if not result[ "OK" ]:
c.error = result[ "Message" ]
gLogger.error( "Result %s: %s" % ( msg , c.error ) )
return render( "/error.mako" )
c.select = dict()
history = dict()
for i in [ "Save" , "Load" ]:
result = self.__getHistory( i )
if not result[ "OK" ]:
history[ i ] = result[ "Message" ]
else:
history[ i ] = result[ "Value" ]
c.select[ "history" ] = history
result = self.__lastUsed()
if not result[ "OK" ]:
c.select[ "layout" ] = ""
c.select[ "error" ] = result[ "Message" ]
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return render("web/Presenter.mako")
c.select[ "layout" ] = result[ "Value" ]
gLogger.info( "Result %s: %s" % ( msg , c.select ) )
return render("web/Presenter.mako")
################################################################################
def kaboom(self):
uList = [ str( getUsername() ) ]
result = list()
for i in [ USER_PROFILE_NAME , "Default" ]:
upc = UserProfileClient( i , getRPCClient )
tmp = upc.deleteProfiles( uList )
allvar = upc.retrieveAllVars()
result.append( allvar )
return result
################################################################################
def all(self):
result = list()
for i in [ USER_PROFILE_NAME , "Default" ]:
upc = UserProfileClient( i , getRPCClient )
allvar = upc.retrieveAllVars()
result.append( allvar )
return result
################################################################################
@jsonify
def action(self):
if not authorizeAction():
return { "success" : "false" , "error" : "Insufficient rights" }
if request.params.has_key( "getAvailbleLayouts" ):
return self.__getLayout()
if request.params.has_key( "getUserLayouts" ):
return self.__getUserLayout()
elif request.params.has_key( "loadLayout" ):
if request.params.has_key( "loadLast" ):
return self.__loadLast()
return self.__loadLayout()
elif request.params.has_key( "saveLayout" ):
return self.__saveLayout()
elif request.params.has_key( "deleteLayout" ):
return self.__deleteLayout()
else:
return {"success":"false","error":"Action is not defined"}
################################################################################
def __array2obj( self , array ):
if not len( array ) > 3 :
gLogger.error( "Length of array %s should be more then 3" % array )
return {
'user' : 'undefined'
, 'group' : 'undefined'
, 'VO' : 'undefined'
, 'name' : 'undefined'
}
return {
'user' : array[ 0 ]
, 'group' : array[ 1 ]
, 'VO' : array[ 2 ]
, 'name' : array[ 3 ]
}
################################################################################
def __params2string( self , params ):
if not params:
return S_ERROR( "Missing first argument" )
if not isinstance( params , list ):
return S_ERROR( "List expected" )
callback = dict()
for i in params:
if not request.params.has_key( i ):
return S_ERROR( "Request has no key %s" % i )
try:
callback[ i ] = str( request.params[ i ] )
except Exception , x :
return S_ERROR ( x )
return S_OK( callback )
################################################################################
def __deleteLayout( self ):
gLogger.info( "Running deleteLayout()" )
msg = "delLayout() for %s@%s" % ( getUsername() , getSelectedGroup() )
result = self.__params2string( DELETE_LAYOUT_ARGS )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
args = result[ "Value" ]
name = args[ "name" ]
history = dict()
for i in [ "Save" , "Load" ]:
result = self.__deleteHistory( name , i )
if not result[ "OK" ]:
history[ i ] = result[ "Message" ]
else:
history[ i ] = result[ "Value" ]
upc = UserProfileClient( USER_PROFILE_NAME, getRPCClient )
result = upc.deleteVar( name )
gLogger.debug( result )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
gLogger.info( "Result %s: %s AND %s" % ( msg , "true" , history ) )
return { "success" : "true" , "result" : "true" , "history" : history }
################################################################################
def __saveLayout( self ):
gLogger.info( "Running saveLayout()" )
msg = "saveLayout() for %s@%s" % ( getUsername() , getSelectedGroup() )
result = self.__params2string( SAVE_LAYOUT_ARGS )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
args = result[ "Value" ]
name = args[ "name" ]
user = args[ "user" ]
group = args[ "group" ]
permissions = args[ "permissions" ]
result = self.__parsePermissions( name , permissions )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
permissions = result[ "Value" ]
data = dict()
for key , value in request.params.items():
try:
if len( value ) > 0:
data[ key ] = str( value )
except:
pass
if not len( data ) > 0:
err = "Data to store has zero length"
gLogger.error( "Result %s: %s" % ( msg , err ) )
return { "success" : "false" , "error" : err }
for i in LOAD_LAYOUT_ARGS : # Add vital params to layout if they are absent
if not data.has_key( i ):
data[ i ] = args[ i ]
upc = UserProfileClient( USER_PROFILE_NAME, getRPCClient )
result = upc.storeVar( name , data , permissions )
gLogger.debug( result )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
result = self.__setHistory( args , "Save" )
history = dict()
if not result[ "OK" ]:
history[ "Save" ] = result[ "Message" ]
else:
history[ "Save" ] = result[ "Value" ]
gLogger.info( "Result %s: %s AND %s" % ( msg , data , history ) )
return { "success" : "true" , "result" : data , "history" : history }
################################################################################
def __lastUsed( self ):
gLogger.info( "Running lastUsed()" )
msg = "lastUsed() for %s@%s" % ( getUsername() , getSelectedGroup() )
result = self.__getHistory( "Load" )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
history = result[ "Value" ]
if not len( history ) > 0:
err = "Load history is empty"
gLogger.error( "Result %s: %s" % ( msg , err ) )
return S_ERROR( err )
args = history[ 0 ]
name = args[ "name" ]
user = args[ "user" ]
group = args[ "group" ]
upc = UserProfileClient( USER_PROFILE_NAME, getRPCClient )
result = upc.retrieveVarFromUser( user , group, name )
gLogger.debug( result )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
layout = result[ "Value" ]
for i in LOAD_LAYOUT_ARGS : # Add params to layout if they are absent
if not layout.has_key( i ):
layout[ i ] = args[ i ]
gLogger.info( "Result %s: %s" % ( msg , layout ) )
return S_OK( layout )
################################################################################
def __loadLast( self ):
gLogger.info( "Running loadLast()" )
msg = "loadLast() for %s@%s" % ( getUsername() , getSelectedGroup() )
result = self.__lastUsed()
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
layout = result[ "Value" ]
gLogger.info( "Result %s: %s" % ( msg , layout ) )
return { "success" : "true" , "result" : layout }
################################################################################
def __loadLayout( self ):
gLogger.info( "Running loadLayout()" )
msg = "loadLayout() for %s@%s" % ( getUsername() , getSelectedGroup() )
result = self.__params2string( LOAD_LAYOUT_ARGS )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
args = result[ "Value" ]
name = args[ "name" ]
user = args[ "user" ]
group = args[ "group" ]
upc = UserProfileClient( USER_PROFILE_NAME, getRPCClient )
result = upc.retrieveVarFromUser( user , group, name )
gLogger.debug( result )
if not result[ "OK" ]:
if result[ "Message" ].find( "No data" ) < 0 :
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
err = "No data found for '%s' by %s@%s" % ( name , user , group )
gLogger.error( "Result %s: %s" % ( msg , err ) )
return { "success" : "false" , "error" : err }
layout = result[ "Value" ]
for i in LOAD_LAYOUT_ARGS : # Add params to layout if they are absent
if not layout.has_key( i ):
layout[ i ] = args[ i ]
result = self.__setHistory( args , "Load" )
history = dict()
if not result[ "OK" ]:
history[ "Load" ] = result[ "Message" ]
else:
history[ "Load" ] = result[ "Value" ]
gLogger.info( "Result %s: %s AND %s" % ( msg , layout , history ) )
return { "success" : "true" , "result" : layout , "history" : history }
################################################################################
def __setHistory( self , item , state ):
"""
Insert item to Load or Save history list in first position and checking for
duplications.
Return resulting list
"item" is a dict
"state" should be either "Save" or "Load" but can be any other value
"""
gLogger.info( "Running setHistory( %s , %s )" % ( item , state ) )
msg = "setHistory() for %s@%s" % ( getUsername() , getSelectedGroup() )
opt = "/Website/" + USER_PROFILE_NAME + "/ShowHistory"
history_length = gConfig.getOptions( opt , 5 )
upc = UserProfileClient( "Default" , getRPCClient )
group = str( getSelectedGroup() )
profile_name = USER_PROFILE_NAME + ".History." + state + "." + group
result = upc.retrieveVar( profile_name )
gLogger.info( result )
if not result[ "OK" ]:
if result[ "Message" ].find( "No data" ) < 0 :
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
history = list()
else:
history = result[ "Value" ]
if not isinstance( history , list ):
err = "List expected at: %s" % profile_name
gLogger.error( "Result %s: %s" % ( msg , err ) )
return S_ERROR( err )
if( len( history ) > history_length ):
history = result[ history_length ]
history.insert( 0 , item )
history = uniqueElements( history )
gLogger.error( "History: %s" % history )
result = upc.storeVar( profile_name , history )
gLogger.info( result )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
gLogger.info( "Result %s: %s" % ( msg , history ) )
return S_OK( history )
################################################################################
def __getHistory( self , state ):
"""
Just get the history based on state
Return resulting list
"state" can be either "Save" or "Load"
"""
gLogger.info( "Running getHistory( %s )" % state )
msg = "getHistory() for %s@%s" % ( getUsername() , getSelectedGroup() )
opt = "/Website/" + USER_PROFILE_NAME + "/ShowHistory"
history_length = gConfig.getOptions( opt , 5 )
upc = UserProfileClient( "Default" , getRPCClient )
group = str( getSelectedGroup() )
profile_name = USER_PROFILE_NAME + ".History." + state + "." + group
result = upc.retrieveVar( profile_name )
gLogger.info( result )
if not result[ "OK" ]:
if result[ "Message" ].find( "No data" ) < 0 :
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
history = list()
else:
history = result[ "Value" ]
if not isinstance( history , list ):
err = "List expected at: %s" % profile_name
gLogger.error( "Result %s: %s" % ( msg , err ) )
return S_ERROR( err )
if( len( history ) > history_length ):
history = result[ history_length ]
gLogger.info( "Result %s: %s" % ( msg , history ) )
return S_OK( history )
################################################################################
def __deleteHistory( self , name , state ):
"""
Deleting item from Load and Save history list
Return resulting list
"name" is a string
"state" can be either "Save" or "Load"
"""
gLogger.info( "Running deleteHistory( %s )" % name )
msg = "deleteHistory() for %s@%s" % ( getUsername() , getSelectedGroup() )
opt = "/Website/" + USER_PROFILE_NAME + "/ShowHistory"
history_length = gConfig.getOptions( opt , 5 )
upc = UserProfileClient( "Default" , getRPCClient )
group = str( getSelectedGroup() )
profile_name = USER_PROFILE_NAME + ".History." + state + "." + group
result = upc.retrieveVar( profile_name )
gLogger.info( result )
if not result[ "OK" ]:
if result[ "Message" ].find( "No data" ) < 0 :
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
gLogger.info( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_OK( list() ) # Nothing to delete, return an empty list
else:
result = result[ "Value" ]
if not isinstance( result , list ):
err = "List expected at: %s" % profile_name
gLogger.error( "Result %s: %s" % ( msg , err ) )
return S_ERROR( err )
history = list()
for i in result:
if i.has_key( "name" ) and not i["name"] == name:
history.append( i )
if( len( history ) > history_length ):
history = result[ history_length ]
gLogger.error( "History: %s" % history )
result = upc.storeVar( profile_name , history )
gLogger.info( result )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
gLogger.info( "Result %s: %s" % ( msg , history ) )
return S_OK( history )
################################################################################
def __getLayout( self ) :
gLogger.info( "Running getLayout()" )
msg = "getLayout() for %s@%s" % ( getUsername() , getSelectedGroup() )
upc = UserProfileClient( USER_PROFILE_NAME, getRPCClient )
result = upc.listAvailableVars()
gLogger.debug( result )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
result = result[ "Value" ]
gLogger.always( "array2obj" )
availble = map( self.__array2obj , result )
gLogger.always( availble )
users = list()
for i in result :
if len( i ) > 1 :
users.append( { "user" : i[ 0 ] } )
users = uniqueElements( users )
gLogger.info( "Result %s: %s AND %s" % ( msg , availble , users ) )
return { "success" : "true" , "result" : availble , "users" : users }
################################################################################
def __getUserLayout( self ) :
gLogger.info( "Running getUserLayout()" )
msg = "getUserLayout() for %s@%s" % ( getUsername() , getSelectedGroup() )
upc = UserProfileClient( USER_PROFILE_NAME, getRPCClient )
result = upc.retrieveAllVars()
gLogger.debug( result )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return { "success" : "false" , "error" : result[ "Message" ] }
layouts = result[ "Value" ]
data = list()
for name , value in layouts.items():
result = self.__getPermissions( name )
if not result[ "OK" ]:
perm = result[ "Message" ]
else:
perm = result[ "Value" ]
if perm.has_key( "ReadAccess" ):
perm = perm[ "ReadAccess" ]
else:
perm = "Undefined"
if value.has_key( "group" ):
group = value[ "group" ]
else:
group = "Undefined"
perm = perm.capitalize()
data.append( { "name" : name , "permission" : perm , "group" : group } )
gLogger.info( "Result %s: %s" % ( msg , data ) )
return { "success" : "true" , "result" : data }
################################################################################
def __getPermissions( self , name = False ):
gLogger.info( "getPermissions( %s )" % name )
msg = "getPermissions() for %s@%s" % ( getUsername() , getSelectedGroup() )
if not name:
err = "'name' argument for getPermissions function is absent"
gLogger.error( "Result %s: %s" % ( msg , err ) )
return S_ERROR( err )
upc = UserProfileClient( USER_PROFILE_NAME, getRPCClient )
result = upc.getVarPermissions( name )
gLogger.debug( result )
if not result[ "OK" ]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
gLogger.info( "Result %s: %s" % ( msg , result[ "Value" ] ) )
return S_OK( result[ "Value" ] )
################################################################################
def __parsePermissions( self , name = False , permissions = False ):
if not name:
err = "'name' argument for parsePermissions function is absent"
return S_ERROR( err )
if not permissions:
err = "'permissions' argument for parsePermissions function is absent"
return S_ERROR( err )
permissions = permissions.strip()
permissions = permissions.upper()
allPermissions = [ "USER" , "GROUP" , "VO" , "ALL" ]
if not permissions in allPermissions:
err = "Value '%s' should be one of %s" % ( permissions , allPermissions )
return S_ERROR( err )
return S_OK( { "ReadAccess" : permissions } )
################################################################################
def __convert(self):
gLogger.info("Running convert()")
msg = "convert() for %s@%s" % ( getUsername() , getSelectedGroup() )
upc = UserProfileClient( "Summary", getRPCClient )
result = upc.retrieveAllVars()
gLogger.info( result )
if not result["OK"]:
if result[ "Message" ].find( "No data" ) < 0 :
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
result = "No data found, nothing to convert"
gLogger.info( "Result %s: %s" % ( msg , result ) )
return S_OK( result )
result = result[ "Value" ]
if not result.has_key( "Bookmarks" ):
result = "No old Bookmarks found"
gLogger.info( "Result %s: %s" % ( msg , result ) )
return S_OK( result )
data = result[ "Bookmarks" ]
try:
layouts = dict( data )
except:
result = "Layouts '%s' is not dictionary, can't convert" % layouts
gLogger.info( "Result %s: %s" % ( msg , result ) )
return S_OK( result )
err = list()
done = list()
gLogger.info( "Saving old data to new place" )
upcnew = UserProfileClient( USER_PROFILE_NAME, getRPCClient )
permissions = "USER"
user = str( getUsername() )
group = str( getSelectedGroup() )
for i in layouts:
data = dict()
data[ "url" ] = layouts[ i ][ "url" ]
data[ "columns" ] = layouts[ i ][ "columns" ]
data[ "refresh" ] = layouts[ i ][ "refresh" ]
result = upcnew.storeVar( i , data , permissions )
gLogger.debug( result )
if not result[ "OK" ]:
err.append( result[ "Message" ] )
continue
done.append( result[ "Value" ] )
result = upc.deleteVar( i )
if not result["OK"]:
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
err.append( result["Message"] )
continue
gLogger.info( "Is something left?" )
result = upc.retrieveAllVars()
gLogger.info( result )
if result[ "OK" ] and len( result[ "Value" ] ) > 0:
text = "Some data has left at old place. Please remove them manually"
gLogger.info( "Result %s: %s" % ( msg , text ) )
if not result["OK"]:
if result[ "Message" ].find( "No data" ) < 0 :
gLogger.error( "Result %s: %s" % ( msg , result[ "Message" ] ) )
return S_ERROR( result[ "Message" ] )
gLogger.info( "Looks like old data are erased" )
if len( err ) == 0 and len( done ) == 0:
good = "Some magic has happens. Neither errors nor succesfull results"
good = good + " Perhaps there is no old profile to convert"
gLogger.info( "Result %s: %s" % ( msg , good ) )
return S_OK( good )
if len( err ) > 0 and len( done ) == 0:
error = "No succesfull results, only errors:\n"
tmp = "\n".join( err )
error = error + tmp
gLogger.error( "Result %s: %s" % ( msg , error ) )
return S_ERROR( error )
if len( err ) > 0 and len( done ) > 0:
good = "Conversion has finished partially sucessfull"
if len( err ) > 0:
good = good + ". There are some errors though\n"
else:
good = good + ". There is an error though\n"
error = "\n".join( err )
good = good + error
gLogger.info( "Result %s: %s" % ( msg , good ) )
return S_OK( good )
if len( err ) == 0 and len( done ) > 0:
good = "Conversion has finished sucessfully"
gLogger.info( "Result %s: %s" % ( msg , good ) )
return S_OK( good )
|
DIRACGrid/DIRACWeb
|
dirac/controllers/web/Presenter.py
|
Python
|
gpl-3.0
| 23,286
|
[
"DIRAC"
] |
284b14b098630270fba5a7193e54662ffdcb75c4e4f55e034f50d921c13e97ad
|
from __future__ import absolute_import
from datetime import datetime
import six
import pytz
import pytest
from django.utils import timezone
from sentry.testutils import AcceptanceTestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import iso_format, before_now
from sentry.utils.compat.mock import patch
from tests.acceptance.page_objects.issue_list import IssueListPage
from tests.acceptance.page_objects.issue_details import IssueDetailsPage
event_time = before_now(days=3).replace(tzinfo=pytz.utc)
class OrganizationGlobalHeaderTest(AcceptanceTestCase, SnubaTestCase):
def setUp(self):
super(OrganizationGlobalHeaderTest, self).setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(
organization=self.org, name="Mariachi Band", members=[self.user]
)
self.project_1 = self.create_project(
organization=self.org, teams=[self.team], name="Bengal"
)
self.project_2 = self.create_project(
organization=self.org, teams=[self.team], name="Sumatra"
)
self.project_3 = self.create_project(
organization=self.org, teams=[self.team], name="Siberian"
)
self.create_environment(name="development", project=self.project_1)
self.create_environment(name="production", project=self.project_1)
self.create_environment(name="visible", project=self.project_1, is_hidden=False)
self.create_environment(name="not visible", project=self.project_1, is_hidden=True)
self.create_environment(name="dev", project=self.project_2)
self.create_environment(name="prod", project=self.project_2)
self.login_as(self.user)
self.issues_list = IssueListPage(self.browser, self.client)
self.issue_details = IssueDetailsPage(self.browser, self.client)
def create_issues(self):
self.issue_1 = self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"timestamp": iso_format(event_time),
"fingerprint": ["group-1"],
},
project_id=self.project_1.id,
)
self.issue_2 = self.store_event(
data={
"event_id": "b" * 32,
"message": "oh snap",
"timestamp": iso_format(event_time),
"fingerprint": ["group-2"],
"environment": "prod",
},
project_id=self.project_2.id,
)
def test_global_selection_header_dropdown(self):
self.dismiss_assistant()
self.project.update(first_event=timezone.now())
self.issues_list.visit_issue_list(
self.org.slug, query="?query=assigned%3Ame&project=" + six.text_type(self.project_1.id)
)
self.browser.wait_until_test_id("awaiting-events")
self.browser.click('[data-test-id="global-header-project-selector"]')
self.browser.snapshot("globalSelectionHeader - project selector")
self.browser.click('[data-test-id="global-header-environment-selector"]')
self.browser.snapshot("globalSelectionHeader - environment selector")
self.browser.click('[data-test-id="global-header-timerange-selector"]')
self.browser.snapshot("globalSelectionHeader - timerange selector")
@pytest.mark.skip(reason="Has been flaky lately.")
def test_global_selection_header_loads_with_correct_project(self):
"""
Global Selection Header should:
1) load project from URL if it exists
2) enforce a single project if loading issues list with no project in URL
a) last selected project via local storage if it exists
b) otherwise need to just select first project
"""
self.create_issues()
# No project id in URL, selects first project
self.issues_list.visit_issue_list(self.org.slug)
assert u"project={}".format(self.project_1.id) in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_1.slug
# Uses project id in URL
self.issues_list.visit_issue_list(
self.org.slug, query=u"?project={}".format(self.project_2.id)
)
assert u"project={}".format(self.project_2.id) in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_2.slug
# reloads page with no project id in URL, selects first project
self.issues_list.visit_issue_list(self.org.slug)
assert u"project={}".format(self.project_1.id) in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_1.slug
# can select a different project
self.issues_list.global_selection.select_project_by_slug(self.project_3.slug)
self.issues_list.wait_until_loaded()
assert u"project={}".format(self.project_3.id) in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_3.slug
# reloading page with no project id in URL after previously
# selecting an explicit project should load previously selected project
# from local storage
# TODO check environment as well
self.issues_list.visit_issue_list(self.org.slug)
self.issues_list.wait_until_loaded()
assert u"project={}".format(self.project_3.id) in self.browser.current_url
def test_global_selection_header_navigates_with_browser_back_button(self):
"""
Global Selection Header should:
1) load project from URL if it exists
2) enforce a single project if loading issues list with no project in URL
a) last selected project via local storage if it exists
b) otherwise need to just select first project
"""
self.create_issues()
# Issues list with project 1 selected
self.issues_list.visit_issue_list(
self.org.slug, query="?project=" + six.text_type(self.project_1.id)
)
self.issues_list.visit_issue_list(self.org.slug)
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_1.slug
# selects a different project
self.issues_list.global_selection.select_project_by_slug(self.project_3.slug)
self.issues_list.wait_until_loaded()
assert u"project={}".format(self.project_3.id) in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_3.slug
# simulate pressing the browser back button
self.browser.back()
self.issues_list.wait_until_loaded()
assert u"project={}".format(self.project_1.id) in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_1.slug
def test_global_selection_header_updates_environment_with_browser_navigation_buttons(self):
"""
Global Selection Header should:
1) load project from URL if it exists
2) clear the current environment if the user clicks clear
3) reload the environment from URL if it exists on browser navigation
"""
with self.feature("organizations:global-views"):
self.create_issues()
"""
set up workflow:
1) environment=All environments
2) environment=prod
3) environment=All environments
"""
self.issues_list.visit_issue_list(self.org.slug)
self.issues_list.wait_until_loaded()
assert u"environment=" not in self.browser.current_url
assert (
self.issue_details.global_selection.get_selected_environment() == "All Environments"
)
self.browser.click('[data-test-id="global-header-environment-selector"]')
self.browser.click('[data-test-id="environment-prod"]')
self.issues_list.wait_until_loaded()
assert u"environment=prod" in self.browser.current_url
assert self.issue_details.global_selection.get_selected_environment() == "prod"
self.browser.click('[data-test-id="global-header-environment-selector"] > svg')
self.issues_list.wait_until_loaded()
assert u"environment=" not in self.browser.current_url
assert (
self.issue_details.global_selection.get_selected_environment() == "All Environments"
)
"""
navigate back through history to the beginning
1) environment=All Environments -> environment=prod
2) environment=prod -> environment=All Environments
"""
self.browser.back()
self.issues_list.wait_until_loaded()
assert u"environment=prod" in self.browser.current_url
assert self.issue_details.global_selection.get_selected_environment() == "prod"
self.browser.back()
self.issues_list.wait_until_loaded()
assert u"environment=" not in self.browser.current_url
assert (
self.issue_details.global_selection.get_selected_environment() == "All Environments"
)
"""
navigate forward through history to the end
1) environment=All Environments -> environment=prod
2) environment=prod -> environment=All Environments
"""
self.browser.forward()
self.issues_list.wait_until_loaded()
assert u"environment=prod" in self.browser.current_url
assert self.issue_details.global_selection.get_selected_environment() == "prod"
self.browser.forward()
self.issues_list.wait_until_loaded()
assert u"environment=" not in self.browser.current_url
assert (
self.issue_details.global_selection.get_selected_environment() == "All Environments"
)
def test_global_selection_header_loads_with_correct_project_with_multi_project(self):
"""
Global Selection Header should:
1) load project from URL if it exists
2) load last selected projects via local storage if it exists
3) otherwise can search within "my projects"
"""
with self.feature("organizations:global-views"):
self.create_issues()
# No project id in URL, is "my projects"
self.issues_list.visit_issue_list(self.org.slug)
assert u"project=" not in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == "My Projects"
assert (
self.browser.get_local_storage_item(u"global-selection:{}".format(self.org.slug))
is None
)
# Uses project id in URL
self.issues_list.visit_issue_list(
self.org.slug, query=u"?project={}".format(self.project_2.id)
)
assert u"project={}".format(self.project_2.id) in self.browser.current_url
assert (
self.issues_list.global_selection.get_selected_project_slug() == self.project_2.slug
)
# should not be in local storage
assert (
self.browser.get_local_storage_item(u"global-selection:{}".format(self.org.slug))
is None
)
# reloads page with no project id in URL, remains "My Projects" because
# there has been no explicit project selection via UI
self.issues_list.visit_issue_list(self.org.slug)
assert u"project=" not in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == "My Projects"
# can select a different project
self.issues_list.global_selection.select_project_by_slug(self.project_3.slug)
self.issues_list.wait_until_loaded()
assert u"project={}".format(self.project_3.id) in self.browser.current_url
assert (
self.issues_list.global_selection.get_selected_project_slug() == self.project_3.slug
)
self.issues_list.global_selection.select_date("Last 24 hours")
self.issues_list.wait_until_loaded()
assert u"statsPeriod=24h" in self.browser.current_url
# This doesn't work because we treat as dynamic data in CI
# assert self.issues_list.global_selection.get_selected_date() == "Last 24 hours"
# reloading page with no project id in URL after previously
# selecting an explicit project should load previously selected project
# from local storage
self.issues_list.visit_issue_list(self.org.slug)
self.issues_list.wait_until_loaded()
# TODO check environment as well
assert u"project={}".format(self.project_3.id) in self.browser.current_url
assert (
self.issues_list.global_selection.get_selected_project_slug() == self.project_3.slug
)
@patch("django.utils.timezone.now")
def test_issues_list_to_details_and_back_with_all_projects(self, mock_now):
"""
If user has access to the `global-views` feature, which allows selecting multiple projects,
they should be able to visit issues list with no project in URL and list issues
for all projects they are members of.
They should also be able to open an issue and then navigate back to still see
"My Projects" in issues list.
"""
with self.feature("organizations:global-views"):
mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)
self.create_issues()
self.issues_list.visit_issue_list(self.org.slug)
self.issues_list.wait_for_issue()
assert u"project=" not in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == "My Projects"
# select the issue
self.issues_list.navigate_to_issue(1)
# going back to issues list should not have the issue's project id in url
self.issues_list.issue_details.go_back_to_issues()
self.issues_list.wait_for_issue()
# project id should remain *NOT* in URL
assert u"project=" not in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == "My Projects"
# can select a different project
self.issues_list.global_selection.select_project_by_slug(self.project_3.slug)
self.issues_list.wait_until_loaded()
assert u"project={}".format(self.project_3.id) in self.browser.current_url
assert (
self.issues_list.global_selection.get_selected_project_slug() == self.project_3.slug
)
@patch("django.utils.timezone.now")
def test_issues_list_to_details_and_back_with_initial_project(self, mock_now):
"""
If user has a project defined in URL, if they visit an issue and then
return back to issues list, that project id should still exist in URL
"""
mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)
self.create_issues()
self.issues_list.visit_issue_list(
self.org.slug, query=u"?project={}".format(self.project_2.id)
)
self.issues_list.wait_for_issue()
assert u"project={}".format(self.project_2.id) in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_2.slug
# select the issue
self.issues_list.navigate_to_issue(1)
# project id should remain in URL
assert u"project={}".format(self.project_2.id) in self.browser.current_url
# going back to issues list should keep project in URL
self.issues_list.issue_details.go_back_to_issues()
self.issues_list.wait_for_issue()
# project id should remain in URL
assert u"project={}".format(self.project_2.id) in self.browser.current_url
# can select a different project
self.issues_list.global_selection.select_project_by_slug(self.project_3.slug)
self.issues_list.wait_until_loaded()
assert u"project={}".format(self.project_3.id) in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_3.slug
@patch("django.utils.timezone.now")
def test_issue_details_to_stream_with_initial_env_no_project(self, mock_now):
"""
Visiting issue details directly with no project but with an environment defined in URL.
When navigating back to issues stream, should keep environment and project in context.
"""
mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)
self.create_issues()
self.issue_details.visit_issue_in_environment(self.org.slug, self.issue_2.group.id, "prod")
# Make sure issue's project is in URL and in header
assert u"project={}".format(self.project_2.id) in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_2.slug
# environment should be in URL and header
assert u"environment=prod" in self.browser.current_url
assert self.issue_details.global_selection.get_selected_environment() == "prod"
# going back to issues list should keep project and environment in URL
self.issue_details.go_back_to_issues()
self.issues_list.wait_for_issue()
# project id should remain in URL
assert u"project={}".format(self.project_2.id) in self.browser.current_url
assert u"environment=prod" in self.browser.current_url
assert self.issues_list.global_selection.get_selected_project_slug() == self.project_2.slug
assert self.issue_details.global_selection.get_selected_environment() == "prod"
@patch("django.utils.timezone.now")
def test_issue_details_to_stream_with_initial_env_no_project_with_multi_project_feature(
self, mock_now
):
"""
Visiting issue details directly with no project but with an environment defined in URL.
When navigating back to issues stream, should keep environment and project in context.
"""
with self.feature("organizations:global-views"):
mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)
self.create_issues()
self.issue_details.visit_issue_in_environment(
self.org.slug, self.issue_2.group.id, "prod"
)
# Make sure issue's project is in URL and in header
assert u"project={}".format(self.project_2.id) in self.browser.current_url
assert (
self.issues_list.global_selection.get_selected_project_slug() == self.project_2.slug
)
# environment should be in URL and header
assert u"environment=prod" in self.browser.current_url
assert self.issue_details.global_selection.get_selected_environment() == "prod"
# can change environment so that when you navigate back to issues stream,
# it keeps environment as selected
# going back to issues list should keep project and environment in URL
self.issue_details.go_back_to_issues()
self.issues_list.wait_for_issue()
# project id should remain in URL
assert u"project={}".format(self.project_2.id) in self.browser.current_url
assert u"environment=prod" in self.browser.current_url
assert (
self.issues_list.global_selection.get_selected_project_slug() == self.project_2.slug
)
assert self.issue_details.global_selection.get_selected_environment() == "prod"
|
beeftornado/sentry
|
tests/acceptance/test_organization_global_selection_header.py
|
Python
|
bsd-3-clause
| 20,258
|
[
"VisIt"
] |
271b5a49759628b5ca5bca8e41bc739b9151e9d35e0c1700e81f99c5002dba26
|
## \file
## \ingroup tutorial_roofit
## \notebook
## Addition and convolution: options for plotting components of composite pdfs.
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Set up composite pdf
# --------------------------------------
# Declare observable x
x = ROOT.RooRealVar("x", "x", 0, 10)
# Create two Gaussian PDFs g1(x,mean1,sigma) anf g2(x,mean2,sigma) and
# their parameters
mean = ROOT.RooRealVar("mean", "mean of gaussians", 5)
sigma1 = ROOT.RooRealVar("sigma1", "width of gaussians", 0.5)
sigma2 = ROOT.RooRealVar("sigma2", "width of gaussians", 1)
sig1 = ROOT.RooGaussian("sig1", "Signal component 1", x, mean, sigma1)
sig2 = ROOT.RooGaussian("sig2", "Signal component 2", x, mean, sigma2)
# Sum the signal components into a composite signal pdf
sig1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in signal", 0.8, 0., 1.)
sig = ROOT.RooAddPdf(
"sig", "Signal", ROOT.RooArgList(sig1, sig2), ROOT.RooArgList(sig1frac))
# Build Chebychev polynomial pdf
a0 = ROOT.RooRealVar("a0", "a0", 0.5, 0., 1.)
a1 = ROOT.RooRealVar("a1", "a1", -0.2, 0., 1.)
bkg1 = ROOT.RooChebychev("bkg1", "Background 1",
x, ROOT.RooArgList(a0, a1))
# Build expontential pdf
alpha = ROOT.RooRealVar("alpha", "alpha", -1)
bkg2 = ROOT.RooExponential("bkg2", "Background 2", x, alpha)
# Sum the background components into a composite background pdf
bkg1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in background", 0.2, 0., 1.)
bkg = ROOT.RooAddPdf(
"bkg", "Signal", ROOT.RooArgList(bkg1, bkg2), ROOT.RooArgList(sig1frac))
# Sum the composite signal and background
bkgfrac = ROOT.RooRealVar("bkgfrac", "fraction of background", 0.5, 0., 1.)
model = ROOT.RooAddPdf(
"model", "g1+g2+a", ROOT.RooArgList(bkg, sig), ROOT.RooArgList(bkgfrac))
# Set up basic plot with data and full pdf
# ------------------------------------------------------------------------------
# Generate a data sample of 1000 events in x from model
data = model.generate(ROOT.RooArgSet(x), 1000)
# Plot data and complete PDF overlaid
xframe = x.frame(ROOT.RooFit.Title(
"Component plotting of pdf=(sig1+sig2)+(bkg1+bkg2)"))
data.plotOn(xframe)
model.plotOn(xframe)
# Clone xframe for use below
xframe2 = xframe.Clone("xframe2")
# Make component by object reference
# --------------------------------------------------------------------
# Plot single background component specified by object reference
ras_bkg = ROOT.RooArgSet(bkg)
model.plotOn(xframe, ROOT.RooFit.Components(
ras_bkg), ROOT.RooFit.LineColor(ROOT.kRed))
# Plot single background component specified by object reference
ras_bkg2 = ROOT.RooArgSet(bkg2)
model.plotOn(xframe, ROOT.RooFit.Components(ras_bkg2), ROOT.RooFit.LineStyle(
ROOT.kDashed), ROOT.RooFit.LineColor(ROOT.kRed))
# Plot multiple background components specified by object reference
# Note that specified components may occur at any level in object tree
# (e.g bkg is component of 'model' and 'sig2' is component 'sig')
ras_bkg_sig2 = ROOT.RooArgSet(bkg, sig2)
model.plotOn(xframe, ROOT.RooFit.Components(ras_bkg_sig2),
ROOT.RooFit.LineStyle(ROOT.kDotted))
# Make component by name/regexp
# ------------------------------------------------------------
# Plot single background component specified by name
model.plotOn(xframe2, ROOT.RooFit.Components(
"bkg"), ROOT.RooFit.LineColor(ROOT.kCyan))
# Plot multiple background components specified by name
model.plotOn(
xframe2,
ROOT.RooFit.Components("bkg1,sig2"),
ROOT.RooFit.LineStyle(
ROOT.kDotted),
ROOT.RooFit.LineColor(
ROOT.kCyan))
# Plot multiple background components specified by regular expression on
# name
model.plotOn(
xframe2,
ROOT.RooFit.Components("sig*"),
ROOT.RooFit.LineStyle(
ROOT.kDashed),
ROOT.RooFit.LineColor(
ROOT.kCyan))
# Plot multiple background components specified by multiple regular
# expressions on name
model.plotOn(
xframe2,
ROOT.RooFit.Components("bkg1,sig*"),
ROOT.RooFit.LineStyle(
ROOT.kDashed),
ROOT.RooFit.LineColor(
ROOT.kYellow),
ROOT.RooFit.Invisible())
# Draw the frame on the canvas
c = ROOT.TCanvas("rf205_compplot", "rf205_compplot", 800, 400)
c.Divide(2)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.15)
xframe.GetYaxis().SetTitleOffset(1.4)
xframe.Draw()
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
xframe2.GetYaxis().SetTitleOffset(1.4)
xframe2.Draw()
c.SaveAs("rf205_compplot.png")
|
root-mirror/root
|
tutorials/roofit/rf205_compplot.py
|
Python
|
lgpl-2.1
| 4,541
|
[
"Gaussian"
] |
dfad4d164d8be79ab5887d26905da075207cde072902162863618e1002c61c00
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.orchestration.airflow.service_v1.services.environments import (
EnvironmentsAsyncClient,
)
from google.cloud.orchestration.airflow.service_v1.services.environments import (
EnvironmentsClient,
)
from google.cloud.orchestration.airflow.service_v1.services.environments import pagers
from google.cloud.orchestration.airflow.service_v1.services.environments import (
transports,
)
from google.cloud.orchestration.airflow.service_v1.types import environments
from google.cloud.orchestration.airflow.service_v1.types import operations
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert EnvironmentsClient._get_default_mtls_endpoint(None) is None
assert (
EnvironmentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
EnvironmentsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
EnvironmentsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
EnvironmentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert EnvironmentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [EnvironmentsClient, EnvironmentsAsyncClient,])
def test_environments_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "composer.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.EnvironmentsGrpcTransport, "grpc"),
(transports.EnvironmentsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_environments_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [EnvironmentsClient, EnvironmentsAsyncClient,])
def test_environments_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "composer.googleapis.com:443"
def test_environments_client_get_transport_class():
transport = EnvironmentsClient.get_transport_class()
available_transports = [
transports.EnvironmentsGrpcTransport,
]
assert transport in available_transports
transport = EnvironmentsClient.get_transport_class("grpc")
assert transport == transports.EnvironmentsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
EnvironmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EnvironmentsClient)
)
@mock.patch.object(
EnvironmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EnvironmentsAsyncClient),
)
def test_environments_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(EnvironmentsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(EnvironmentsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc", "true"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc", "false"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
EnvironmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EnvironmentsClient)
)
@mock.patch.object(
EnvironmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EnvironmentsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_environments_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [EnvironmentsClient, EnvironmentsAsyncClient])
@mock.patch.object(
EnvironmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EnvironmentsClient)
)
@mock.patch.object(
EnvironmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EnvironmentsAsyncClient),
)
def test_environments_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_environments_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
EnvironmentsClient,
transports.EnvironmentsGrpcTransport,
"grpc",
grpc_helpers,
),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_environments_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_environments_client_client_options_from_dict():
with mock.patch(
"google.cloud.orchestration.airflow.service_v1.services.environments.transports.EnvironmentsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = EnvironmentsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
EnvironmentsClient,
transports.EnvironmentsGrpcTransport,
"grpc",
grpc_helpers,
),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_environments_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"composer.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="composer.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [environments.CreateEnvironmentRequest, dict,])
def test_create_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.CreateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
client.create_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.CreateEnvironmentRequest()
@pytest.mark.asyncio
async def test_create_environment_async(
transport: str = "grpc_asyncio", request_type=environments.CreateEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.CreateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_environment_async_from_dict():
await test_create_environment_async(request_type=dict)
def test_create_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.CreateEnvironmentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.CreateEnvironmentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_environment_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_environment(
parent="parent_value",
environment=environments.Environment(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].environment
mock_val = environments.Environment(name="name_value")
assert arg == mock_val
def test_create_environment_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_environment(
environments.CreateEnvironmentRequest(),
parent="parent_value",
environment=environments.Environment(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_environment_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_environment(
parent="parent_value",
environment=environments.Environment(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].environment
mock_val = environments.Environment(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_environment_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_environment(
environments.CreateEnvironmentRequest(),
parent="parent_value",
environment=environments.Environment(name="name_value"),
)
@pytest.mark.parametrize("request_type", [environments.GetEnvironmentRequest, dict,])
def test_get_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = environments.Environment(
name="name_value",
uuid="uuid_value",
state=environments.Environment.State.CREATING,
)
response = client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.GetEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environments.Environment)
assert response.name == "name_value"
assert response.uuid == "uuid_value"
assert response.state == environments.Environment.State.CREATING
def test_get_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
client.get_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.GetEnvironmentRequest()
@pytest.mark.asyncio
async def test_get_environment_async(
transport: str = "grpc_asyncio", request_type=environments.GetEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.Environment(
name="name_value",
uuid="uuid_value",
state=environments.Environment.State.CREATING,
)
)
response = await client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.GetEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environments.Environment)
assert response.name == "name_value"
assert response.uuid == "uuid_value"
assert response.state == environments.Environment.State.CREATING
@pytest.mark.asyncio
async def test_get_environment_async_from_dict():
await test_get_environment_async(request_type=dict)
def test_get_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.GetEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
call.return_value = environments.Environment()
client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.GetEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.Environment()
)
await client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_environment_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = environments.Environment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_environment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_environment_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_environment(
environments.GetEnvironmentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_environment_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = environments.Environment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.Environment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_environment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_environment_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_environment(
environments.GetEnvironmentRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [environments.ListEnvironmentsRequest, dict,])
def test_list_environments(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environments.ListEnvironmentsResponse(
next_page_token="next_page_token_value",
)
response = client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.ListEnvironmentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEnvironmentsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_environments_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
client.list_environments()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.ListEnvironmentsRequest()
@pytest.mark.asyncio
async def test_list_environments_async(
transport: str = "grpc_asyncio", request_type=environments.ListEnvironmentsRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.ListEnvironmentsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.ListEnvironmentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEnvironmentsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_environments_async_from_dict():
await test_list_environments_async(request_type=dict)
def test_list_environments_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.ListEnvironmentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
call.return_value = environments.ListEnvironmentsResponse()
client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_environments_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.ListEnvironmentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.ListEnvironmentsResponse()
)
await client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_environments_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environments.ListEnvironmentsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_environments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_environments_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_environments(
environments.ListEnvironmentsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_environments_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environments.ListEnvironmentsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.ListEnvironmentsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_environments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_environments_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_environments(
environments.ListEnvironmentsRequest(), parent="parent_value",
)
def test_list_environments_pager(transport_name: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
environments.ListEnvironmentsResponse(
environments=[
environments.Environment(),
environments.Environment(),
environments.Environment(),
],
next_page_token="abc",
),
environments.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(),], next_page_token="ghi",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(), environments.Environment(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_environments(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, environments.Environment) for i in results)
def test_list_environments_pages(transport_name: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
environments.ListEnvironmentsResponse(
environments=[
environments.Environment(),
environments.Environment(),
environments.Environment(),
],
next_page_token="abc",
),
environments.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(),], next_page_token="ghi",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(), environments.Environment(),],
),
RuntimeError,
)
pages = list(client.list_environments(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_environments_async_pager():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
environments.ListEnvironmentsResponse(
environments=[
environments.Environment(),
environments.Environment(),
environments.Environment(),
],
next_page_token="abc",
),
environments.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(),], next_page_token="ghi",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(), environments.Environment(),],
),
RuntimeError,
)
async_pager = await client.list_environments(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, environments.Environment) for i in responses)
@pytest.mark.asyncio
async def test_list_environments_async_pages():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
environments.ListEnvironmentsResponse(
environments=[
environments.Environment(),
environments.Environment(),
environments.Environment(),
],
next_page_token="abc",
),
environments.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(),], next_page_token="ghi",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(), environments.Environment(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_environments(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [environments.UpdateEnvironmentRequest, dict,])
def test_update_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.UpdateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
client.update_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.UpdateEnvironmentRequest()
@pytest.mark.asyncio
async def test_update_environment_async(
transport: str = "grpc_asyncio", request_type=environments.UpdateEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.UpdateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_environment_async_from_dict():
await test_update_environment_async(request_type=dict)
def test_update_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.UpdateEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.UpdateEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_update_environment_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_environment(
name="name_value",
environment=environments.Environment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].environment
mock_val = environments.Environment(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_environment_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_environment(
environments.UpdateEnvironmentRequest(),
name="name_value",
environment=environments.Environment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_environment_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_environment(
name="name_value",
environment=environments.Environment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].environment
mock_val = environments.Environment(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_environment_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_environment(
environments.UpdateEnvironmentRequest(),
name="name_value",
environment=environments.Environment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [environments.DeleteEnvironmentRequest, dict,])
def test_delete_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.DeleteEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
client.delete_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.DeleteEnvironmentRequest()
@pytest.mark.asyncio
async def test_delete_environment_async(
transport: str = "grpc_asyncio", request_type=environments.DeleteEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.DeleteEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_environment_async_from_dict():
await test_delete_environment_async(request_type=dict)
def test_delete_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.DeleteEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.DeleteEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_environment_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_environment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_environment_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_environment(
environments.DeleteEnvironmentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_environment_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_environment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_environment_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_environment(
environments.DeleteEnvironmentRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EnvironmentsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = EnvironmentsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = EnvironmentsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EnvironmentsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = EnvironmentsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.EnvironmentsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.EnvironmentsGrpcTransport,
transports.EnvironmentsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.EnvironmentsGrpcTransport,)
def test_environments_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.EnvironmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_environments_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.orchestration.airflow.service_v1.services.environments.transports.EnvironmentsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.EnvironmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_environment",
"get_environment",
"list_environments",
"update_environment",
"delete_environment",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_environments_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.orchestration.airflow.service_v1.services.environments.transports.EnvironmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EnvironmentsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_environments_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.orchestration.airflow.service_v1.services.environments.transports.EnvironmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EnvironmentsTransport()
adc.assert_called_once()
def test_environments_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
EnvironmentsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.EnvironmentsGrpcTransport,
transports.EnvironmentsGrpcAsyncIOTransport,
],
)
def test_environments_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.EnvironmentsGrpcTransport, grpc_helpers),
(transports.EnvironmentsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_environments_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"composer.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="composer.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.EnvironmentsGrpcTransport, transports.EnvironmentsGrpcAsyncIOTransport],
)
def test_environments_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_environments_host_no_port():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="composer.googleapis.com"
),
)
assert client.transport._host == "composer.googleapis.com:443"
def test_environments_host_with_port():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="composer.googleapis.com:8000"
),
)
assert client.transport._host == "composer.googleapis.com:8000"
def test_environments_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EnvironmentsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_environments_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EnvironmentsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.EnvironmentsGrpcTransport, transports.EnvironmentsGrpcAsyncIOTransport],
)
def test_environments_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.EnvironmentsGrpcTransport, transports.EnvironmentsGrpcAsyncIOTransport],
)
def test_environments_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_environments_grpc_lro_client():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_environments_grpc_lro_async_client():
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_environment_path():
project = "squid"
location = "clam"
environment = "whelk"
expected = "projects/{project}/locations/{location}/environments/{environment}".format(
project=project, location=location, environment=environment,
)
actual = EnvironmentsClient.environment_path(project, location, environment)
assert expected == actual
def test_parse_environment_path():
expected = {
"project": "octopus",
"location": "oyster",
"environment": "nudibranch",
}
path = EnvironmentsClient.environment_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_environment_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = EnvironmentsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = EnvironmentsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = EnvironmentsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = EnvironmentsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = EnvironmentsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = EnvironmentsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = EnvironmentsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = EnvironmentsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = EnvironmentsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = EnvironmentsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.EnvironmentsTransport, "_prep_wrapped_messages"
) as prep:
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.EnvironmentsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = EnvironmentsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport),
(EnvironmentsAsyncClient, transports.EnvironmentsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-orchestration-airflow
|
tests/unit/gapic/service_v1/test_environments.py
|
Python
|
apache-2.0
| 97,014
|
[
"Octopus"
] |
63a9a998af4c754f213305e030ec9023e5d8a3dea035d8876b1fd969a8c1f2ca
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing DIRAC, implemented as an easyblock
"""
import os
import re
import shutil
import tempfile
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
class EB_DIRAC(CMakeMake):
"""Support for building/installing DIRAC."""
def configure_step(self):
"""Custom configuration procedure for DIRAC."""
# make very sure the install directory isn't there yet, since it may cause problems if it used (forced rebuild)
if os.path.exists(self.installdir):
self.log.warning("Found existing install directory %s, removing it to avoid problems", self.installdir)
try:
shutil.rmtree(self.installdir)
except OSError as err:
raise EasyBuildError("Failed to remove existing install directory %s: %s", self.installdir, err)
self.cfg['separate_build_dir'] = True
self.cfg.update('configopts', "-DENABLE_MPI=ON -DCMAKE_BUILD_TYPE=release")
# complete configuration with configure_method of parent
super(EB_DIRAC, self).configure_step()
def test_step(self):
"""Custom built-in test procedure for DIRAC."""
if self.cfg['runtest']:
# set up test environment
# see http://diracprogram.org/doc/release-14/installation/testing.html
env.setvar('DIRAC_TMPDIR', tempfile.mkdtemp(prefix='dirac-test-'))
env.setvar('DIRAC_MPI_COMMAND', self.toolchain.mpi_cmd_for('', self.cfg['parallel']))
# run tests (may take a while, especially if some tests take a while to time out)
self.log.info("Running tests may take a while, especially if some tests timeout (default timeout is 1500s)")
cmd = "make test"
out, ec = run_cmd(cmd, simple=False, log_all=False, log_ok=False)
# check that majority of tests pass
# some may fail due to timeout, but that's acceptable
# cfr. https://groups.google.com/forum/#!msg/dirac-users/zEd5-xflBnY/OQ1pSbuX810J
# over 90% of tests should pass
passed_regex = re.compile('^(9|10)[0-9.]+% tests passed', re.M)
if not passed_regex.search(out) and not self.dry_run:
raise EasyBuildError("Too many failed tests; '%s' not found in test output: %s",
passed_regex.pattern, out)
# extract test results
test_result_regex = re.compile(r'^\s*[0-9]+/[0-9]+ Test \s*#[0-9]+: .*', re.M)
test_results = test_result_regex.findall(out)
if test_results:
self.log.info("Found %d test results: %s", len(test_results), test_results)
elif self.dry_run:
# dummy test result
test_results = ["1/1 Test #1: dft_alda_xcfun ............................. Passed 72.29 sec"]
else:
raise EasyBuildError("Couldn't find *any* test results?")
test_count_regex = re.compile(r'^\s*[0-9]+/([0-9]+)')
res = test_count_regex.search(test_results[0])
if res:
test_count = int(res.group(1))
elif self.dry_run:
# a single dummy test result
test_count = 1
else:
raise EasyBuildError("Failed to determine total test count from %s using regex '%s'",
test_results[0], test_count_regex.pattern)
if len(test_results) != test_count:
raise EasyBuildError("Expected to find %s test results, but found %s", test_count, len(test_results))
# check test results, only 'Passed' or 'Timeout' are acceptable outcomes
faulty_tests = []
for test_result in test_results:
if ' Passed ' not in test_result:
self.log.warning("Found failed test: %s", test_result)
if '***Timeout' not in test_result:
faulty_tests.append(test_result)
if faulty_tests:
raise EasyBuildError("Found tests failing due to something else than timeout: %s", faulty_tests)
def sanity_check_step(self):
"""Custom sanity check for DIRAC."""
custom_paths = {
'files': ['bin/pam-dirac'],
'dirs': ['share/dirac'],
}
super(EB_DIRAC, self).sanity_check_step(custom_paths=custom_paths)
|
hpcleuven/easybuild-easyblocks
|
easybuild/easyblocks/d/dirac.py
|
Python
|
gpl-2.0
| 5,737
|
[
"DIRAC"
] |
415905ee4144768533ac57fc5243ba6535ffd5cb2a4c9136f4142b3ce334ab4a
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
import matplotlib
import pytest
import MDAnalysis as mda
from MDAnalysisTests.datafiles import (GRO, XTC, TPR, DihedralArray,
DihedralsArray, RamaArray, GLYRamaArray,
JaninArray, LYSJaninArray, PDB_rama,
PDB_janin)
from MDAnalysis.analysis.dihedrals import Dihedral, Ramachandran, Janin
class TestDihedral(object):
@pytest.fixture()
def atomgroup(self):
u = mda.Universe(GRO, XTC)
ag = u.select_atoms("(resid 4 and name N CA C) or (resid 5 and name N)")
return ag
def test_dihedral(self, atomgroup):
dihedral = Dihedral([atomgroup]).run()
test_dihedral = np.load(DihedralArray)
assert_almost_equal(dihedral.results.angles, test_dihedral, 5,
err_msg="error: dihedral angles should "
"match test values")
def test_dihedral_single_frame(self, atomgroup):
dihedral = Dihedral([atomgroup]).run(start=5, stop=6)
test_dihedral = [np.load(DihedralArray)[5]]
assert_almost_equal(dihedral.results.angles, test_dihedral, 5,
err_msg="error: dihedral angles should "
"match test vales")
def test_atomgroup_list(self, atomgroup):
dihedral = Dihedral([atomgroup, atomgroup]).run()
test_dihedral = np.load(DihedralsArray)
assert_almost_equal(dihedral.results.angles, test_dihedral, 5,
err_msg="error: dihedral angles should "
"match test values")
def test_enough_atoms(self, atomgroup):
with pytest.raises(ValueError):
dihedral = Dihedral([atomgroup[:2]]).run()
def test_dihedral_attr_warning(self, atomgroup):
dihedral = Dihedral([atomgroup]).run(stop=2)
wmsg = "The `angle` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(dihedral.angles, dihedral.results.angles)
class TestRamachandran(object):
@pytest.fixture()
def universe(self):
return mda.Universe(GRO, XTC)
@pytest.fixture()
def rama_ref_array(self):
return np.load(RamaArray)
def test_ramachandran(self, universe, rama_ref_array):
rama = Ramachandran(universe.select_atoms("protein")).run()
assert_almost_equal(rama.results.angles, rama_ref_array, 5,
err_msg="error: dihedral angles should "
"match test values")
def test_ramachandran_single_frame(self, universe, rama_ref_array):
rama = Ramachandran(universe.select_atoms("protein")).run(
start=5, stop=6)
assert_almost_equal(rama.results.angles[0], rama_ref_array[5], 5,
err_msg="error: dihedral angles should "
"match test values")
def test_ramachandran_residue_selections(self, universe):
rama = Ramachandran(universe.select_atoms("resname GLY")).run()
test_rama = np.load(GLYRamaArray)
assert_almost_equal(rama.results.angles, test_rama, 5,
err_msg="error: dihedral angles should "
"match test values")
def test_outside_protein_length(self, universe):
with pytest.raises(ValueError):
rama = Ramachandran(universe.select_atoms("resid 220"),
check_protein=True).run()
def test_outside_protein_unchecked(self, universe):
rama = Ramachandran(universe.select_atoms("resid 220"),
check_protein=False).run()
def test_protein_ends(self, universe):
with pytest.warns(UserWarning) as record:
rama = Ramachandran(universe.select_atoms("protein"),
check_protein=True).run()
assert len(record) == 1
def test_None_removal(self):
with pytest.warns(UserWarning):
u = mda.Universe(PDB_rama)
rama = Ramachandran(u.select_atoms("protein").residues[1:-1])
def test_plot(self, universe):
ax = Ramachandran(universe.select_atoms("resid 5-10")).run().plot(ref=True)
assert isinstance(ax, matplotlib.axes.Axes), \
"Ramachandran.plot() did not return and Axes instance"
def test_ramachandran_attr_warning(self, universe):
rama = Ramachandran(universe.select_atoms("protein")).run(stop=2)
wmsg = "The `angle` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(rama.angles, rama.results.angles)
class TestJanin(object):
@pytest.fixture()
def universe(self):
return mda.Universe(GRO, XTC)
@pytest.fixture()
def universe_tpr(self):
return mda.Universe(TPR, XTC)
@pytest.fixture()
def janin_ref_array(self):
return np.load(JaninArray)
def test_janin(self, universe, janin_ref_array):
self._test_janin(universe, janin_ref_array)
def test_janin_tpr(self, universe_tpr, janin_ref_array):
"""Test that CYSH are filtered (#2898)"""
self._test_janin(universe_tpr, janin_ref_array)
def _test_janin(self, u, ref_array):
janin = Janin(u.select_atoms("protein")).run()
# Test precision lowered to account for platform differences with osx
assert_almost_equal(janin.results.angles, ref_array, 3,
err_msg="error: dihedral angles should "
"match test values")
def test_janin_single_frame(self, universe, janin_ref_array):
janin = Janin(universe.select_atoms("protein")).run(start=5, stop=6)
assert_almost_equal(janin.results.angles[0], janin_ref_array[5], 3,
err_msg="error: dihedral angles should "
"match test values")
def test_janin_residue_selections(self, universe):
janin = Janin(universe.select_atoms("resname LYS")).run()
test_janin = np.load(LYSJaninArray)
assert_almost_equal(janin.results.angles, test_janin, 3,
err_msg="error: dihedral angles should "
"match test values")
def test_outside_protein_length(self, universe):
with pytest.raises(ValueError):
janin = Janin(universe.select_atoms("resid 220")).run()
def test_remove_residues(self, universe):
with pytest.warns(UserWarning):
janin = Janin(universe.select_atoms("protein")).run()
def test_atom_selection(self):
with pytest.raises(ValueError):
u = mda.Universe(PDB_janin)
janin = Janin(u.select_atoms("protein and not resname ALA CYS GLY "
"PRO SER THR VAL"))
def test_plot(self, universe):
ax = Janin(universe.select_atoms("resid 5-10")).run().plot(ref=True)
assert isinstance(ax, matplotlib.axes.Axes), \
"Ramachandran.plot() did not return and Axes instance"
def test_janin_attr_warning(self, universe):
janin = Janin(universe.select_atoms("protein")).run(stop=2)
wmsg = "The `angle` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(janin.angles, janin.results.angles)
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_dihedrals.py
|
Python
|
gpl-2.0
| 8,616
|
[
"MDAnalysis"
] |
4748a2f0274a77770053298b278c8af7c9f22742d61dfab09f332a0b8848da4f
|
"""
Script to verify all examples in the readme.
Simply execute
python test_readme_examples.py
"""
from __future__ import print_function, division
import numpy as np
from scipy import misc
def main():
example_standard_situation()
example_heavy_augmentations()
example_show()
#example_grayscale()
example_determinism()
example_keypoints()
example_single_augmenters()
example_withchannels()
example_unusual_distributions()
example_hooks()
example_background_augment_batches()
example_background_classes()
def example_standard_situation():
print("Example: Standard Situation")
# -------
# dummy functions to make the example runnable here
def load_batch(batch_idx):
return np.random.randint(0, 255, (1, 16, 16, 3), dtype=np.uint8)
def train_on_images(images):
pass
# -------
from imgaug import augmenters as iaa
seq = iaa.Sequential([
iaa.Crop(px=(0, 16)), # crop images from each side by 0 to 16px (randomly chosen)
iaa.Fliplr(0.5), # horizontally flip 50% of the images
iaa.GaussianBlur(sigma=(0, 3.0)) # blur images with a sigma of 0 to 3.0
])
for batch_idx in range(1000):
# 'images' should be either a 4D numpy array of shape (N, height, width, channels)
# or a list of 3D numpy arrays, each having shape (height, width, channels).
# Grayscale images must have shape (height, width, 1) each.
# All images must have numpy's dtype uint8. Values are expected to be in
# range 0-255.
images = load_batch(batch_idx)
images_aug = seq.augment_images(images)
train_on_images(images_aug)
# -----
# Make sure that the example really does something
if batch_idx == 0:
assert not np.array_equal(images, images_aug)
def example_heavy_augmentations():
print("Example: Heavy Augmentations")
import imgaug as ia
from imgaug import augmenters as iaa
# random example images
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
st = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.5), # vertically flip 50% of all images
st(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
st(iaa.GaussianBlur((0, 3.0))), # blur images with a sigma between 0 and 3.0
st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5)), # add gaussian noise to images
st(iaa.Dropout((0.0, 0.1), per_channel=0.5)), # randomly remove up to 10% of the pixels
st(iaa.Add((-10, 10), per_channel=0.5)), # change brightness of images (by -10 to 10 of original value)
st(iaa.Multiply((0.5, 1.5), per_channel=0.5)), # change brightness of images (50-150% of original value)
st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)), # improve or worsen the contrast
st(iaa.Grayscale((0.0, 1.0))), # blend with grayscale image
st(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_px={"x": (-16, 16), "y": (-16, 16)}, # translate by -16 to +16 pixels (per axis)
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
order=[0, 1], # use scikit-image's interpolation orders 0 (nearest neighbour) and 1 (bilinear)
cval=(0, 255), # if mode is constant, use a cval between 0 and 1.0
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)) # apply elastic transformations with random strengths
],
random_order=True # do all of the above in random order
)
images_aug = seq.augment_images(images)
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
def example_show():
print("Example: Show")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# show an image with 8*8 augmented versions of image 0
seq.show_grid(images[0], cols=8, rows=8)
# Show an image with 8*8 augmented versions of image 0 and 8*8 augmented
# versions of image 1. The identical augmentations will be applied to
# image 0 and 1.
seq.show_grid([images[0], images[1]], cols=8, rows=8)
# this example is no longer necessary as the library can now handle 2D images
"""
def example_grayscale():
print("Example: Grayscale")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# The library expects a list of images (3D inputs) or a single array (4D inputs).
# So we add an axis to our grayscale array to convert it to shape (16, 128, 128, 1).
images_aug = seq.augment_images(images[:, :, :, np.newaxis])
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
"""
def example_determinism():
print("Example: Determinism")
from imgaug import augmenters as iaa
# Standard scenario: You have N RGB-images and additionally 21 heatmaps per image.
# You want to augment each image and its heatmaps identically.
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
heatmaps = np.random.randint(0, 255, (16, 128, 128, 21), dtype=np.uint8)
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(translate_px={"x": (-40, 40)})])
# Convert the stochastic sequence of augmenters to a deterministic one.
# The deterministic sequence will always apply the exactly same effects to the images.
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
images_aug = seq_det.augment_images(images)
heatmaps_aug = seq_det.augment_images(heatmaps)
# -----
# Make sure that the example really does something
import imgaug as ia
assert not np.array_equal(images, images_aug)
assert not np.array_equal(heatmaps, heatmaps_aug)
images_show = []
for img_idx in range(len(images)):
images_show.extend([images[img_idx], images_aug[img_idx], heatmaps[img_idx][..., 0:3], heatmaps_aug[img_idx][..., 0:3]])
ia.show_grid(images_show, cols=4)
def example_keypoints():
print("Example: Keypoints")
import imgaug as ia
from imgaug import augmenters as iaa
from scipy import misc
import random
images = np.random.randint(0, 50, (4, 128, 128, 3), dtype=np.uint8)
# Generate random keypoints.
# The augmenters expect a list of imgaug.KeypointsOnImage.
keypoints_on_images = []
for image in images:
height, width = image.shape[0:2]
keypoints = []
for _ in range(4):
x = random.randint(0, width-1)
y = random.randint(0, height-1)
keypoints.append(ia.Keypoint(x=x, y=y))
keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=image.shape))
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(scale=(0.5, 0.7))])
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
# augment keypoints and images
images_aug = seq_det.augment_images(images)
keypoints_aug = seq_det.augment_keypoints(keypoints_on_images)
# Example code to show each image and print the new keypoints coordinates
for img_idx, (image_before, image_after, keypoints_before, keypoints_after) in enumerate(zip(images, images_aug, keypoints_on_images, keypoints_aug)):
image_before = keypoints_before.draw_on_image(image_before)
image_after = keypoints_after.draw_on_image(image_after)
misc.imshow(np.concatenate((image_before, image_after), axis=1)) # before and after
for kp_idx, keypoint in enumerate(keypoints_after.keypoints):
keypoint_old = keypoints_on_images[img_idx].keypoints[kp_idx]
x_old, y_old = keypoint_old.x, keypoint_old.y
x_new, y_new = keypoint.x, keypoint.y
print("[Keypoints for image #%d] before aug: x=%d y=%d | after aug: x=%d y=%d" % (img_idx, x_old, y_old, x_new, y_new))
def example_single_augmenters():
print("Example: Single Augmenters")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
flipper = iaa.Fliplr(1.0) # always horizontally flip each input image
images[0] = flipper.augment_image(images[0]) # horizontally flip image 0
vflipper = iaa.Flipud(0.9) # vertically flip each input image with 90% probability
images[1] = vflipper.augment_image(images[1]) # probably vertically flip image 1
blurer = iaa.GaussianBlur(3.0)
images[2] = blurer.augment_image(images[2]) # blur image 2 by a sigma of 3.0
images[3] = blurer.augment_image(images[3]) # blur image 3 by a sigma of 3.0 too
translater = iaa.Affine(translate_px={"x": -16}) # move each input image by 16px to the left
images[4] = translater.augment_image(images[4]) # move image 4 to the left
scaler = iaa.Affine(scale={"y": (0.8, 1.2)}) # scale each input image to 80-120% on the y axis
images[5] = scaler.augment_image(images[5]) # scale image 5 by 80-120% on the y axis
def example_withchannels():
print("Example: WithChannels")
from imgaug import augmenters as iaa
import numpy as np
# fake RGB images
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# add a random value from the range (-30, 30) to the first two channels of
# input images (e.g. to the R and G channels)
aug = iaa.WithChannels(
channels=[0, 1],
children=iaa.Add((-30, 30))
)
images_aug = aug.augment_images(images)
def example_unusual_distributions():
print("Example: Unusual Distributions")
from imgaug import augmenters as iaa
from imgaug import parameters as iap
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Blur by a value sigma which is sampled from a uniform distribution
# of range 0.1 <= x < 3.0.
# The convenience shortcut for this is: iaa.GaussianBlur((0.1, 3.0))
blurer = iaa.GaussianBlur(iap.Uniform(0.1, 3.0))
images_aug = blurer.augment_images(images)
# Blur by a value sigma which is sampled from a normal distribution N(1.0, 0.1),
# i.e. sample a value that is usually around 1.0.
# Clip the resulting value so that it never gets below 0.1 or above 3.0.
blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(1.0, 0.1), 0.1, 3.0))
images_aug = blurer.augment_images(images)
# Same again, but this time the mean of the normal distribution is not constant,
# but comes itself from a uniform distribution between 0.5 and 1.5.
blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(iap.Uniform(0.5, 1.5), 0.1), 0.1, 3.0))
images_aug = blurer.augment_images(images)
# Use for sigma one of exactly three allowed values: 0.5, 1.0 or 1.5.
blurer = iaa.GaussianBlur(iap.Choice([0.5, 1.0, 1.5]))
images_aug = blurer.augment_images(images)
# Sample sigma from a discrete uniform distribution of range 1 <= sigma <= 5,
# i.e. sigma will have any of the following values: 1, 2, 3, 4, 5.
blurer = iaa.GaussianBlur(iap.DiscreteUniform(1, 5))
images_aug = blurer.augment_images(images)
def example_hooks():
print("Example: Hooks")
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
# images and heatmaps, just arrays filled with value 30
images = np.ones((16, 128, 128, 3), dtype=np.uint8) * 30
heatmaps = np.ones((16, 128, 128, 21), dtype=np.uint8) * 30
# add vertical lines to see the effect of flip
images[:, 16:128-16, 120:124, :] = 120
heatmaps[:, 16:128-16, 120:124, :] = 120
seq = iaa.Sequential([
iaa.Fliplr(0.5, name="Flipper"),
iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
iaa.Dropout(0.02, name="Dropout"),
iaa.AdditiveGaussianNoise(scale=0.01*255, name="MyLittleNoise"),
iaa.AdditiveGaussianNoise(loc=32, scale=0.0001*255, name="SomeOtherNoise"),
iaa.Affine(translate_px={"x": (-40, 40)}, name="Affine")
])
# change the activated augmenters for heatmaps
def activator_heatmaps(images, augmenter, parents, default):
if augmenter.name in ["GaussianBlur", "Dropout", "MyLittleNoise"]:
return False
else:
# default value for all other augmenters
return default
hooks_heatmaps = ia.HooksImages(activator=activator_heatmaps)
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
images_aug = seq_det.augment_images(images)
heatmaps_aug = seq_det.augment_images(heatmaps, hooks=hooks_heatmaps)
# -----------
ia.show_grid(images_aug)
ia.show_grid(heatmaps_aug[..., 0:3])
def example_background_augment_batches():
print("Example: Background Augmentation via augment_batches()")
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from skimage import data
# Number of batches and batch size for this example
nb_batches = 10
batch_size = 32
# Example augmentation sequence to run in the background
augseq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.CoarseDropout(p=0.1, size_percent=0.1)
])
# For simplicity, we use the same image here many times
astronaut = data.astronaut()
astronaut = ia.imresize_single_image(astronaut, (64, 64))
# Make batches out of the example image (here: 10 batches, each 32 times
# the example image)
batches = []
for _ in range(nb_batches):
batches.append(
np.array(
[astronaut for _ in range(batch_size)],
dtype=np.uint8
)
)
# Show the augmented images.
# Note that augment_batches() returns a generator.
for images_aug in augseq.augment_batches(batches, background=True):
misc.imshow(ia.draw_grid(images_aug, cols=8))
def example_background_classes():
print("Example: Background Augmentation via Classes")
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from skimage import data
# Example augmentation sequence to run in the background.
augseq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.CoarseDropout(p=0.1, size_percent=0.1)
])
# A generator that loads batches from the hard drive.
def load_batches():
# Here, load 10 batches of size 4 each.
# You can also load an infinite amount of batches, if you don't train
# in epochs.
batch_size = 4
nb_batches = 10
# Here, for simplicity we just always use the same image.
astronaut = data.astronaut()
astronaut = ia.imresize_single_image(astronaut, (64, 64))
for i in range(nb_batches):
# A list containing all images of the batch.
batch_images = []
# A list containing IDs per image. This is not necessary for the
# background augmentation and here just used to showcase that you
# can transfer additional information.
batch_data = []
# Add some images to the batch.
for b in range(batch_size):
batch_images.append(astronaut)
batch_data.append((i, b))
# Create the batch object to send to the background processes.
batch = ia.Batch(
images=np.array(batch_images, dtype=np.uint8),
data=batch_data
)
yield batch
# background augmentation consists of two components:
# (1) BatchLoader, which runs in a Thread and calls repeatedly a user-defined
# function (here: load_batches) to load batches (optionally with keypoints
# and additional information) and sends them to a queue of batches.
# (2) BackgroundAugmenter, which runs several background processes (on other
# CPU cores). Each process takes batches from the queue defined by (1),
# augments images/keypoints and sends them to another queue.
# The main process can then read augmented batches from the queue defined
# by (2).
batch_loader = ia.BatchLoader(load_batches)
bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq)
# Run until load_batches() returns nothing anymore. This also allows infinite
# training.
while True:
print("Next batch...")
batch = bg_augmenter.get_batch()
if batch is None:
print("Finished epoch.")
break
images_aug = batch.images_aug
print("Image IDs: ", batch.data)
misc.imshow(np.hstack(list(images_aug)))
batch_loader.terminate()
bg_augmenter.terminate()
if __name__ == "__main__":
main()
|
nektor211/imgaug
|
tests/test_readme_examples.py
|
Python
|
mit
| 17,754
|
[
"Gaussian"
] |
11c233333ef9c45938066a3773b7623b8ce72008b00acda0961b8030f7009413
|
"""
Unit tests for masquerade.
"""
import json
import pickle
from datetime import datetime
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from pytz import UTC
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from courseware.masquerade import CourseMasquerade, MasqueradingKeyValueStore, get_masquerading_user_group
from courseware.tests.factories import StaffFactory
from courseware.tests.helpers import LoginEnrollmentTestCase, masquerade_as_group_member
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from nose.plugins.attrib import attr
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference, set_user_preference
from student.tests.factories import UserFactory
from xblock.runtime import DictKeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
class MasqueradeTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Base class for masquerade tests that sets up a test course and enrolls a user in the course.
"""
@classmethod
def setUpClass(cls):
super(MasqueradeTestCase, cls).setUpClass()
cls.course = CourseFactory.create(number='masquerade-test', metadata={'start': datetime.now(UTC)})
cls.info_page = ItemFactory.create(
category="course_info", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
cls.chapter = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name="Test Section",
)
cls.sequential_display_name = "Test Masquerade Subsection"
cls.sequential = ItemFactory.create(
parent_location=cls.chapter.location,
category="sequential",
display_name=cls.sequential_display_name,
)
cls.vertical = ItemFactory.create(
parent_location=cls.sequential.location,
category="vertical",
display_name="Test Unit",
)
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
cls.problem_display_name = "TestMasqueradeProblem"
cls.problem = ItemFactory.create(
parent_location=cls.vertical.location,
category='problem',
data=problem_xml,
display_name=cls.problem_display_name
)
def setUp(self):
super(MasqueradeTestCase, self).setUp()
self.test_user = self.create_user()
self.login(self.test_user.email, 'test')
self.enroll(self.course, True)
def get_courseware_page(self):
"""
Returns the server response for the courseware page.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.block_id,
'section': self.sequential.location.block_id,
}
)
return self.client.get(url)
def get_course_info_page(self):
"""
Returns the server response for course info page.
"""
url = reverse(
'info',
kwargs={
'course_id': unicode(self.course.id),
}
)
return self.client.get(url)
def get_progress_page(self):
"""
Returns the server response for progress page.
"""
url = reverse(
'progress',
kwargs={
'course_id': unicode(self.course.id),
}
)
return self.client.get(url)
def verify_staff_debug_present(self, staff_debug_expected):
"""
Verifies that the staff debug control visibility is as expected (for staff only).
"""
content = self.get_courseware_page().content
self.assertIn(self.sequential_display_name, content, "Subsection should be visible")
self.assertEqual(staff_debug_expected, 'Staff Debug Info' in content)
def get_problem(self):
"""
Returns the JSON content for the problem in the course.
"""
problem_url = reverse(
'xblock_handler',
kwargs={
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location),
'handler': 'xmodule_handler',
'suffix': 'problem_get'
}
)
return self.client.get(problem_url)
def verify_show_answer_present(self, show_answer_expected):
"""
Verifies that "Show Answer" is only present when expected (for staff only).
"""
problem_html = json.loads(self.get_problem().content)['html']
self.assertIn(self.problem_display_name, problem_html)
self.assertEqual(show_answer_expected, "Show Answer" in problem_html)
def ensure_masquerade_as_group_member(self, partition_id, group_id):
"""
Installs a masquerade for the test_user and test course, to enable the
user to masquerade as belonging to the specific partition/group combination.
Also verifies that the call to install the masquerade was successful.
Arguments:
partition_id (int): the integer partition id, referring to partitions already
configured in the course.
group_id (int); the integer group id, within the specified partition.
"""
self.assertEqual(200, masquerade_as_group_member(self.test_user, self.course, partition_id, group_id))
@attr(shard=1)
class NormalStudentVisibilityTest(MasqueradeTestCase):
"""
Verify the course displays as expected for a "normal" student (to ensure test setup is correct).
"""
def create_user(self):
"""
Creates a normal student user.
"""
return UserFactory()
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_not_visible(self):
"""
Tests that staff debug control is not present for a student.
"""
self.verify_staff_debug_present(False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_not_visible(self):
"""
Tests that "Show Answer" is not visible for a student.
"""
self.verify_show_answer_present(False)
class StaffMasqueradeTestCase(MasqueradeTestCase):
"""
Base class for tests of the masquerade behavior for a staff member.
"""
def create_user(self):
"""
Creates a staff user.
"""
return StaffFactory(course_key=self.course.id)
def update_masquerade(self, role, group_id=None, user_name=None):
"""
Toggle masquerade state.
"""
masquerade_url = reverse(
'masquerade_update',
kwargs={
'course_key_string': unicode(self.course.id),
}
)
response = self.client.post(
masquerade_url,
json.dumps({"role": role, "group_id": group_id, "user_name": user_name}),
"application/json"
)
self.assertEqual(response.status_code, 200)
return response
@attr(shard=1)
class TestStaffMasqueradeAsStudent(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as student.
"""
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_with_masquerade(self):
"""
Tests that staff debug control is not visible when masquerading as a student.
"""
# Verify staff initially can see staff debug
self.verify_staff_debug_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_staff_debug_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_staff_debug_present(True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_for_staff(self):
"""
Tests that "Show Answer" is not visible when masquerading as a student.
"""
# Verify that staff initially can see "Show Answer".
self.verify_show_answer_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_show_answer_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_show_answer_present(True)
@attr(shard=1)
class TestStaffMasqueradeAsSpecificStudent(StaffMasqueradeTestCase, ProblemSubmissionTestMixin):
"""
Check for staff being able to masquerade as a specific student.
"""
def setUp(self):
super(TestStaffMasqueradeAsSpecificStudent, self).setUp()
self.student_user = self.create_user()
self.login_student()
self.enroll(self.course, True)
def login_staff(self):
""" Login as a staff user """
self.logout()
self.login(self.test_user.email, 'test')
def login_student(self):
""" Login as a student """
self.logout()
self.login(self.student_user.email, 'test')
def submit_answer(self, response1, response2):
"""
Submit an answer to the single problem in our test course.
"""
return self.submit_question_answer(
self.problem_display_name,
{'2_1': response1, '2_2': response2}
)
def get_progress_detail(self):
"""
Return the reported progress detail for the problem in our test course.
The return value is a string like u'1/2'.
"""
json_data = json.loads(self.look_at_question(self.problem_display_name).content)
progress = '%s/%s' % (str(json_data['current_score']), str(json_data['total_possible']))
return progress
def assertExpectedLanguageInPreference(self, user, expected_language_code):
"""
This method is a custom assertion verifies that a given user has expected
language code in the preference and in cookies.
Arguments:
user: User model instance
expected_language_code: string indicating a language code
"""
self.assertEqual(
get_user_preference(user, LANGUAGE_KEY), expected_language_code
)
self.assertEqual(
self.client.cookies[settings.LANGUAGE_COOKIE].value, expected_language_code
)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_user_on_self_paced(self):
"""
Test masquerading as a specific user for course info page when self paced configuration
"enable_course_home_improvements" flag is set
Login as a staff user and visit course info page.
set masquerade to view same page as a specific student and revisit the course info page.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student,enable the self paced configuration, and check we can see the info page.
SelfPacedConfiguration(enable_course_home_improvements=True).save()
self.update_masquerade(role='student', user_name=self.student_user.username)
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student(self):
"""
Test masquerading as a specific user.
We answer the problem in our test course as the student and as staff user, and we use the
progress as a proxy to determine who's state we currently see.
"""
# Answer correctly as the student, and check progress.
self.login_student()
self.submit_answer('Correct', 'Correct')
self.assertEqual(self.get_progress_detail(), u'2/2')
# Log in as staff, and check the problem is unanswered.
self.login_staff()
self.assertEqual(self.get_progress_detail(), u'0/2')
# Masquerade as the student, and check we can see the student state.
self.update_masquerade(role='student', user_name=self.student_user.username)
self.assertEqual(self.get_progress_detail(), u'2/2')
# Temporarily override the student state.
self.submit_answer('Correct', 'Incorrect')
self.assertEqual(self.get_progress_detail(), u'1/2')
# Reload the page and check we see the student state again.
self.get_courseware_page()
self.assertEqual(self.get_progress_detail(), u'2/2')
# Become the staff user again, and check the problem is still unanswered.
self.update_masquerade(role='staff')
self.assertEqual(self.get_progress_detail(), u'0/2')
# Verify the student state did not change.
self.login_student()
self.assertEqual(self.get_progress_detail(), u'2/2')
def test_masquerading_with_language_preference(self):
"""
Tests that masquerading as a specific user for the course does not update preference language
for the staff.
Login as a staff user and set user's language preference to english and visit the courseware page.
Set masquerade to view same page as a specific student having different language preference and
revisit the courseware page.
"""
english_language_code = 'en'
set_user_preference(self.test_user, preference_key=LANGUAGE_KEY, preference_value=english_language_code)
self.login_staff()
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
# Set student language preference and set masquerade to view same page the student.
set_user_preference(self.student_user, preference_key=LANGUAGE_KEY, preference_value='es-419')
self.update_masquerade(role='student', user_name=self.student_user.username)
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student_course_info(self):
"""
Test masquerading as a specific user for course info page.
We login with login_staff and check course info page content if it's working and then we
set masquerade to view same page as a specific student and test if it's working or not.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student, and check we can see the info page.
self.update_masquerade(role='student', user_name=self.student_user.username)
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
def test_masquerade_as_specific_student_progress(self):
"""
Test masquerading as a specific user for progress page.
"""
# Give the student some correct answers, check their progress page
self.login_student()
self.submit_answer('Correct', 'Correct')
student_progress = self.get_progress_page().content
self.assertNotIn("1 of 2 possible points", student_progress)
self.assertIn("2 of 2 possible points", student_progress)
# Staff answers are slightly different
self.login_staff()
self.submit_answer('Incorrect', 'Correct')
staff_progress = self.get_progress_page().content
self.assertNotIn("2 of 2 possible points", staff_progress)
self.assertIn("1 of 2 possible points", staff_progress)
# Should now see the student's scores
self.update_masquerade(role='student', user_name=self.student_user.username)
masquerade_progress = self.get_progress_page().content
self.assertNotIn("1 of 2 possible points", masquerade_progress)
self.assertIn("2 of 2 possible points", masquerade_progress)
@attr(shard=1)
class TestGetMasqueradingGroupId(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestGetMasqueradingGroupId, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_get_masquerade_group(self):
"""
Tests that a staff member can masquerade as being in a group in a user partition
"""
# Verify there is no masquerading group initially
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
self.assertIsNone(group)
# Install a masquerading group
self.ensure_masquerade_as_group_member(0, 1)
# Verify that the masquerading group is returned
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
self.assertEqual(group.id, 1)
class ReadOnlyKeyValueStore(DictKeyValueStore):
"""
A KeyValueStore that raises an exception on attempts to modify it.
Used to make sure MasqueradingKeyValueStore does not try to modify the underlying KeyValueStore.
"""
def set(self, key, value):
assert False, "ReadOnlyKeyValueStore may not be modified."
def delete(self, key):
assert False, "ReadOnlyKeyValueStore may not be modified."
def set_many(self, update_dict): # pylint: disable=unused-argument
assert False, "ReadOnlyKeyValueStore may not be modified."
class FakeSession(dict):
""" Mock for Django session object. """
modified = False # We need dict semantics with a writable 'modified' property
class MasqueradingKeyValueStoreTest(TestCase):
"""
Unit tests for the MasqueradingKeyValueStore class.
"""
def setUp(self):
super(MasqueradingKeyValueStoreTest, self).setUp()
self.ro_kvs = ReadOnlyKeyValueStore({'a': 42, 'b': None, 'c': 'OpenCraft'})
self.session = FakeSession()
self.kvs = MasqueradingKeyValueStore(self.ro_kvs, self.session)
def test_all(self):
self.assertEqual(self.kvs.get('a'), 42)
self.assertEqual(self.kvs.get('b'), None)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
with self.assertRaises(KeyError):
self.kvs.get('d')
self.assertTrue(self.kvs.has('a'))
self.assertTrue(self.kvs.has('b'))
self.assertTrue(self.kvs.has('c'))
self.assertFalse(self.kvs.has('d'))
self.kvs.set_many({'a': 'Norwegian Blue', 'd': 'Giraffe'})
self.kvs.set('b', 7)
self.assertEqual(self.kvs.get('a'), 'Norwegian Blue')
self.assertEqual(self.kvs.get('b'), 7)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
self.assertEqual(self.kvs.get('d'), 'Giraffe')
for key in 'abd':
self.assertTrue(self.kvs.has(key))
self.kvs.delete(key)
with self.assertRaises(KeyError):
self.kvs.get(key)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
class CourseMasqueradeTest(TestCase):
"""
Unit tests for the CourseMasquerade class.
"""
def test_unpickling_sets_all_attributes(self):
"""
Make sure that old CourseMasquerade objects receive missing attributes when unpickled from
the session.
"""
cmasq = CourseMasquerade(7)
del cmasq.user_name
pickled_cmasq = pickle.dumps(cmasq)
unpickled_cmasq = pickle.loads(pickled_cmasq)
self.assertEqual(unpickled_cmasq.user_name, None)
|
proversity-org/edx-platform
|
lms/djangoapps/courseware/tests/test_masquerade.py
|
Python
|
agpl-3.0
| 21,271
|
[
"VisIt"
] |
c0d8fea871c153758c6e1e28875770802f029d73f6fb4de35bc3d1dc1ad46484
|
#!/usr/bin/python
# guess the number in the palm of your friend.
# break - break is way of coming out of the loop abruptly.
# sys.exit - take your out of the program.
# task: restrict the people to max tries for 3 times
import sys
number = 7
#test = True # boolean
answer = raw_input("Do you want to play the game: y/n ?")
if answer == 'n':
sys.exit()
# while always works on truth of a statement/condtion
#while test:
while True:
guess_num = int(raw_input("please enter your number:"))
if guess_num > number:
print "Buddy!! the number you guessed is slightly larger !!"
elif guess_num < number:
print "Buddy!! the number you guessed is slightly smaller !!"
elif guess_num == number:
print "congo!!! buddy you guessed the right number !!"
#test=False # boolean
break
print "Thanks for playing the game!! please visit us again!!"
|
tuxfux-hlp-notes/python-batches
|
archieves/Batch-63/04-guess_number.py
|
Python
|
gpl-3.0
| 851
|
[
"VisIt"
] |
cb7d3362a31cb35fdf7d6b7ff2e133ef0c969a368fa0abf8a8099a5cf323e986
|
from mdtraj.geometry import alignment
import numpy as np
def compute_atomwise_deviation_xyz(X_xyz,Y_xyz):
''' given two sets of coordinates as numpy arrays,
align them and return the vector of distances between
corresponding pairs of atoms'''
X_prime = alignment.transform(X_xyz, Y_xyz)
delta = X_prime - Y_xyz
deviation = ((delta**2).sum(1))**0.5
return deviation
def compute_atomwise_deviation(X,Y):
''' given trajectory frames, compute atomwise deviations'''
return compute_atomwise_deviation_xyz(X.xyz[0],Y.xyz[0])
def wRMSD(X,Y,w='unweighted'):
''' compute weighted RMSD using a weight vector'''
dev = compute_atomwise_deviation(X,Y)
if w == 'unweighted':
wdev = sum(dev)
else:
wdev = w.dot(dev)
return np.sqrt(wdev) / len(X.xyz.T)
def wRMSD_xyz(X,Y,w='unweighted'):
''' compute weighted RMSD using a weight vector'''
dev = compute_atomwise_deviation_xyz(X,Y)
if w == 'unweighted':
wdev = sum(dev)
else:
wdev = w.dot(dev)
return np.sqrt(wdev) / len(X.T)
def compute_kinetic_weights(traj,tau=10):
''' for all tau-lagged pairs of observations in traj, compute their
atomwise_deviations. Return (the mean over the observation pairs)**-1'''
n_frames=len(traj)-tau
n_atoms = traj.n_atoms
atomwise_deviations = np.zeros((n_frames,n_atoms))
for i in range(n_frames):
atomwise_deviations[i] = compute_atomwise_deviation(traj[i],traj[i+tau])
means = np.mean(atomwise_deviations,0)
weights = 1/means
return weights/sum(weights)
def sgd(objective,dataset,init_point,batch_size=20,n_iter=100,step_size=0.01,seed=0):
''' objective takes in a parameter vector and an array of data'''
np.random.seed(seed)
testpoints = np.zeros((n_iter,len(init_point)))
testpoints[0] = init_point
shuffled = np.array(dataset)
np.random.shuffle(shuffled)
accept_frac = 1.0*batch_size/dataset.shape[0]
ind=0
for i in range(1,n_iter):
#max_ind = ind+batch_size
#if max_ind<len(dataset):
# subset = dataset[ind:max_ind]
# ind = (ind + batch_size)
#else:
# new_ind = (max_ind-len(dataset))
# subset = np.vstack([dataset[ind:],dataset[:new_ind]])
subset = dataset[np.random.rand(len(dataset))<accept_frac]
obj_grad = grad(lambda p:objective(p,subset))
gradient = np.nan_to_num(obj_grad(testpoints[i-1]))
#print(gradient)
testpoints[i] = testpoints[i-1] - gradient*step_size
return testpoints
def near_far_triplet_loss(metric,batch,tau_1=1,tau_2=10):
''' batch is a numpy array of time-ordered observations'''
#triplets = np.array([(batch[i],batch[i+tau_1],batch[i+tau_2]) for i in range(len(batch)-tau_2)])
cost=0
n_triplets = len(batch)-tau_2
for i in range(n_triplets):
cost+=penalty(metric,batch[i],batch[i+tau_1],batch[i+tau_2])
return cost / n_triplets
def triplet_wrmsd_loss(weights,dataset,tau_1=1,tau_2=10):
''' given a weight vector and a dataset, compute the
'''
metric = lambda x,y:wRMSD(x,y,weights)
return near_far_triplet_loss(metric,dataset,tau_1,tau_2)
|
maxentile/msm-learn
|
projects/metric-learning/weighted_rmsd.py
|
Python
|
mit
| 3,197
|
[
"MDTraj"
] |
8c9c95c4e8f5ba954b5d0fe87da97c677e063abdaaa2ca6d0c8b5a90ba6455b9
|
#!/usr/bin/env python
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Authors: Georg Winkler, Dominik Gresch <greschd@gmx.ch>
import numpy as np
import scipy.linalg as la
import tbmodels as tb
import pymatgen as mg
import pymatgen.symmetry.analyzer
import symmetry_representation as sr
def spin_reps(prep):
"""
Calculates the spin rotation matrices. The formulas to determine the rotation axes and angles
are taken from `here <http://scipp.ucsc.edu/~haber/ph116A/rotation_11.pdf>`_.
:param prep: List that contains 3d rotation matrices.
:type prep: list(array)
"""
# general representation of the D1/2 rotation about the axis (l,m,n) around the
# angle phi
D12 = lambda l, m, n, phi: np.array(
[
[
np.cos(phi / 2.0) - 1j * n * np.sin(phi / 2.0),
(-1j * l - m) * np.sin(phi / 2.0),
],
[
(-1j * l + m) * np.sin(phi / 2.0),
np.cos(phi / 2.0) + 1j * n * np.sin(phi / 2.0),
],
]
)
n = np.zeros(3)
tr = np.trace(prep)
det = np.round(np.linalg.det(prep), 5)
if det == 1.0: # rotations
theta = np.arccos(0.5 * (tr - 1.0))
if theta != 0:
n[0] = prep[2, 1] - prep[1, 2]
n[1] = prep[0, 2] - prep[2, 0]
n[2] = prep[1, 0] - prep[0, 1]
if (
np.round(np.linalg.norm(n), 5) == 0.0
): # theta = pi, that is C2 rotations
e, v = la.eig(prep)
n = v[:, list(np.round(e, 10)).index(1.0)]
spin = np.round(D12(n[0], n[1], n[2], np.pi), 15)
else:
n /= np.linalg.norm(n)
spin = np.round(D12(n[0], n[1], n[2], theta), 15)
else: # case of unitiy
spin = D12(0, 0, 0, 0)
elif det == -1.0: # improper rotations and reflections
theta = np.arccos(0.5 * (tr + 1.0))
if np.round(theta, 5) != np.round(np.pi, 5):
n[0] = prep[2, 1] - prep[1, 2]
n[1] = prep[0, 2] - prep[2, 0]
n[2] = prep[1, 0] - prep[0, 1]
if np.round(np.linalg.norm(n), 5) == 0.0: # theta = 0 (reflection)
e, v = la.eig(prep)
# normal vector is eigenvector to eigenvalue -1
n = v[:, list(np.round(e, 10)).index(-1.0)]
# spin is a pseudovector!
spin = np.round(D12(n[0], n[1], n[2], np.pi), 15)
else:
n /= np.linalg.norm(n)
# rotation followed by reflection:
spin = np.round(
np.dot(D12(n[0], n[1], n[2], np.pi), D12(n[0], n[1], n[2], theta)),
15,
)
else: # case of inversion (does not do anything to spin)
spin = D12(0, 0, 0, 0)
return np.array(spin)
if __name__ == "__main__":
# For this example we already changed the order of the orbitals (unlike the
# other symmetrization example).
model_nosym = tb.Model.from_hdf5_file("data/model_nosym.hdf5")
# set up symmetry operations
time_reversal = sr.SymmetryOperation(
rotation_matrix=np.eye(3),
repr_matrix=np.kron([[0, -1j], [1j, 0]], np.eye(7)),
repr_has_cc=True,
)
structure = mg.Structure(
lattice=model_nosym.uc,
species=["In", "As"],
coords=np.array([[0, 0, 0], [0.25, 0.25, 0.25]]),
)
# get real-space representations
analyzer = mg.symmetry.analyzer.SpacegroupAnalyzer(structure)
symops = analyzer.get_symmetry_operations(cartesian=False)
symops_cart = analyzer.get_symmetry_operations(cartesian=True)
rots = [x.rotation_matrix for x in symops]
taus = [x.translation_vector for x in symops]
# get corresponding represesentations in the Hamiltonian basis
reps = []
for n, (rot, tau) in enumerate(zip(rots, taus)):
C = symops_cart[n].rotation_matrix
tauc = symops_cart[n].translation_vector
prep = C
spinrep = spin_reps(C)
R = np.kron(spinrep, la.block_diag(1.0, prep, prep))
reps.append(R)
# set up the space group symmetries
symmetries = [
sr.SymmetryOperation(
# r-space and k-space matrices are related by transposing and inverting
rotation_matrix=rot,
repr_matrix=repr_mat,
repr_has_cc=False,
)
for rot, repr_mat in zip(rots, reps)
]
point_group = sr.SymmetryGroup(symmetries=symmetries, full_group=True)
sr.io.save([time_reversal, point_group], "results/symmetries.hdf5")
|
Z2PackDev/TBmodels
|
examples/cmdline/symmetrization/generate_symmetries.py
|
Python
|
apache-2.0
| 4,636
|
[
"pymatgen"
] |
f334e48ef8f24c3e7c4140ffb3712cb92bf060ee842d7cad68f62d0f5fe89e47
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.