code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#
# Copyright John Reid 2008
#
"""
Code to build single gap PSSM models using HMMs.
"""
import hmm
class MotifModelPositionMap(object):
"""
Maps between positions in a motif and positions in a model representing the motif.
"""
def __init__(self, K):
"""
@arg K: The length of the motif.
"""
self.K = K
"The length of the ungapped motif."
self.N = self.K*2
"The number of states in the model."
def model_idx(self, k, positive):
"@return: The index into the model's states that represents the k'th base in the given orientation."
if positive:
return k
else:
return k+self.N/2
def motif_position(self, n):
"@return: (k, positive) where k is the index into the motif and positive represents the orientation."
if n < self.K:
return n, True
else:
return n-self.K, False
class SingleGappedPssmBuilder(object):
"""
Knows how to build a single gapped pssm.
"""
def __init__(
self,
K,
gap_index,
markov_order=0,
M=4
):
"""
@arg K: The number of positions in the gapped pssm.
@arg gap_index: The index of the position in the motif which is the gap. I.e. 1 would
place the gap after the first base.
@arg markov_order: the Markov order of the model.
@arg M: the output alphabet size.
"""
self.map = MotifModelPositionMap(K)
"Maps positions in the pssm to the model and back again."
self.K = K
"The number of positions in the gapped pssm."
self.gap_index = gap_index
"""
The index of the position in the motif which is the gap. I.e. 1 would
place the gap after the first base.
"""
if self.gap_index < 1 or self.gap_index > self.K-2:
raise RuntimeError('Gap must be in middle of motif.')
self.markov_order = markov_order
"The Markov order of the model."
self.M = M
"The output alphabet size."
def num_states(self):
"The number of states in a model of the shape defined by this builder."
return self.map.N
def _set_emissions(self, model, positive_state, negative_state, emissions):
"""
Set the emissions of the states to be the complement of each other.
"""
assert len(emissions) == self.M
emission_params = [model.add_parameter(p_e) for p_e in emissions]
for m, p in enumerate(emission_params):
positive_state.b[m] = p
negative_state.b[-m-1] = p
def get_emissions_and_gap_probabilities(self, model, offset=0):
"@return: emissions, gap_probabilities"
import numpy
emissions = model.B[offset:offset+self.K]
gap_probabilities = numpy.ones(self.K)
gap_probabilities[self.gap_index] = model.A[offset+self.gap_index-1, offset+self.gap_index]
return emissions, gap_probabilities
def create(
self,
p_gap,
emissions
):
"""
@arg p_gap: the probability of a gap.
@arg emissions: the emission distributions of the bases (including the gap).
@returns: A tuple (model, positive_start, positive_end, negative_start, negative_end).
The model is defined by its states and
includes both the motif and its reverse complement. positive_start indexes the first state in the
positive motif and negative_start indexes the first state in the negative motif.
"""
# create the model
model = hmm.ModelByStates(M=self.M, markov_order=self.markov_order)
# add enough states to the models
for k in xrange(self.num_states()):
model.add_state()
# link the states
transition_param_one = model.add_parameter(1.)
transition_param_gap = model.add_parameter(p_gap)
transition_param_not_gap = model.add_parameter(1. - p_gap)
# positive transitions
for k in xrange(self.K-1):
if k+1 != self.gap_index:
# this is not the base before the gap
# so just connect to next base
model.states[self.map.model_idx(k, True)].add_successor(
model.states[self.map.model_idx(k+1, True)],
transition_param_one
)
else:
# this is the base before the gap
# so connect to the gap and the base after the gap
model.states[self.map.model_idx(k, True)].add_successor(
model.states[self.map.model_idx(k+1, True)],
transition_param_gap
)
model.states[self.map.model_idx(k, True)].add_successor(
model.states[self.map.model_idx(k+2, True)],
transition_param_not_gap
)
# negative transitions
for k in xrange(self.K-1):
if k != self.gap_index:
# this is not the base before the gap
# so just connect to next base
model.states[self.map.model_idx(k+1, False)].add_successor(
model.states[self.map.model_idx(k, False)],
transition_param_one
)
else:
# this is the base before the gap
# so connect to the gap and the base after the gap
model.states[self.map.model_idx(k+1, False)].add_successor(
model.states[self.map.model_idx(k, False)],
transition_param_gap
)
model.states[self.map.model_idx(k+1, False)].add_successor(
model.states[self.map.model_idx(k-1, False)],
transition_param_not_gap
)
# fill in the emission distributions
assert len(emissions) == self.K
for k, base_emissions in enumerate(emissions):
self._set_emissions(
model,
model.states[self.map.model_idx(k, True)],
model.states[self.map.model_idx(k, False)],
base_emissions
)
return (
model,
[
self.map.model_idx(k=0, positive=True),
self.map.model_idx(k=self.K-1, positive=False),
],
[
self.map.model_idx(k=self.K-1, positive=True),
self.map.model_idx(k=0, positive=False),
]
)
def extend_model(model, extension):
"""
Copies all the states, and emission and transition parameters from the extension model into
the model.
@arg model: The model to be extended.
@arg extension: The model that is the extension.
"""
assert model.M == extension.M
# add a parameter to model for each parameter in extension
extended_parameters = [
model.add_parameter(p)
for p in extension.parameters
]
# add a state to model for each state in extension
extended_states = [
model.add_state()
for state in extension.states
]
state_map = dict((state, i) for i, state in enumerate(extension.states))
# add the transitions to the model
for state in extension.states:
extended_state = extended_states[state_map[state]]
for successor in state.successors:
extended_state.add_successor(
extended_states[state_map[successor.state]],
extended_parameters[successor.a.idx]
)
# add the emissions to the model
for state in extension.states:
extended_state = extended_states[state_map[state]]
for m, b in enumerate(state.b):
extended_state.b[m] = extended_parameters[b.idx]
def simplest_background_model(markov_order=0, M=4):
model = hmm.ModelByStates(M=M, markov_order=markov_order)
state = model.add_state()
state.add_successor(state, model.add_parameter(1.))
for m in xrange(M):
state.b[m] = model.add_parameter(1./M)
return model
def add_to_simple_background_model(model, in_states, out_states, p_binding_site):
"""
Create a simple background model and extend it with a copy of the given model.
@arg model: The model to extend the background model with.
@arg in_states: Indices of those states the background model should transition to.
@arg out_states: Indices of those states that should transition back to the background model.
"""
complete_model = simplest_background_model(model.converter.n, model.M)
extend_model(complete_model, model)
# link the background model to the positive and negative parts of the single gapped pssm
binding_site_transition_param = complete_model.add_parameter(p_binding_site/len(in_states))
back_to_bg_transition_param = complete_model.add_parameter(1.)
for in_state in in_states:
complete_model.states[0].add_successor(
complete_model.states[1+in_state],
binding_site_transition_param
)
for out_state in out_states:
complete_model.states[1+out_state].add_successor(
complete_model.states[0],
back_to_bg_transition_param
)
complete_model.states[0].pi = complete_model.add_parameter(1.)
return complete_model
if '__main__' == __name__:
import numpy
# build a single gapped pssm with some random emissions
builder = SingleGappedPssmBuilder(K=6, gap_index=1, markov_order=0, M=4)
emissions = numpy.array(
[
hmm.dirichlet_draw(numpy.ones(builder.M) * .1)
for k in xrange(builder.K)
]
)
emissions[builder.gap_index] = hmm.dirichlet_draw(numpy.ones(builder.M) * .3)
model_by_states, in_states, out_states = builder.create(
p_gap=.6,
emissions=emissions
)
# create a background model and add the single gapped pssm to it
complete_model = add_to_simple_background_model(
model_by_states,
in_states,
out_states,
p_binding_site=.01)
# convert to other type of model
model = hmm.as_model(complete_model)
# write as a graph
hmm.graph_as_svg(
model,
'single-gapped-hmm',
graphing_keywords={'include_emissions':False},
neato_properties={'-Elen':2}
)
# get the emissions and gap probabilities and write a logo
emissions_copy, gap_probs = builder.get_emissions_and_gap_probabilities(model, offset=1)
assert (emissions_copy - emissions).sum() < 1e-10
import hmm.pssm.logo as logo
image = logo.pssm_as_image(emissions, transparencies=gap_probs)
image.save("single-gapped-pssm-logo.png", "PNG")
|
JohnReid/biopsy
|
Python/hmm/pssm/single_gap.py
|
Python
|
mit
| 10,682
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
new_nums = []
last = None
for n in nums:
if n != last:
new_nums.append(n)
last = n
return new_nums
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
for idx1, n1 in enumerate(list1):
for n2 in list2:
if n1 > n2:
list1.insert(idx1, list2.pop(0))
else:
break
list1.extend(list2)
return list1
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
|
g-sobral/google-python-exercises
|
basic/list2.py
|
Python
|
apache-2.0
| 2,495
|
from setuptools import setup, find_packages
readme = open('README.rst').read()
changes = open('CHANGES.rst').read()
setup(
name='pydocker-tools',
version='0.0.1',
description='pydocker-tools is a set of tools to work around lengthy or piped command line tools for docker',
license='MIT',
url='https://github.com/jojees/pydocker-tools',
author='Joji Vithayathil Johny',
author_email='joji@jojees.net',
long_description=readme + '\n\n' + changes,
packages=find_packages(
exclude=['tests']
),
install_requires=[
'requests[security]',
'argparse'
],
test_suite='tests',
entry_points={
'console_scripts': [
'jpy-dtools = pydockertools.jpydtools:main'
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Shells',
'Topic :: System :: Systems Administration',
'Topic :: Text Processing :: Indexing',
'Topic :: Utilities'
],
keywords='additional docker commandline tools'
)
|
jojees/pydocker-tools
|
setup.py
|
Python
|
mit
| 1,691
|
#!/usr/bin/env python
import sys, os
from stat import *
from distutils.core import setup
from distutils.command.install import install as _install
INSTALLED_FILES = '.installed_files'
#stolen from ccsm
class install (_install):
def run (self):
_install.run(self)
outputs = self.get_outputs()
data = '\n'.join(outputs)
try:
f = open(INSTALLED_FILES, 'w')
except:
self.warn ('Could not write installed files list %s' %INSTALLED_FILES)
return
f.write(data)
f.close()
class uninstall(_install):
def run(self):
try:
files = file(INSTALLED_FILES, 'r').readlines()
except:
self.warn('Could not read installed files list %s' %INSTALLED_FILES)
return
for f in files:
print 'Uninstalling %s' %f.strip()
try:
os.unlink(f.strip())
except:
self.warn('Could not remove file %s' %f)
os.remove(INSTALLED_FILES)
version = open('VERSION', 'r').read().strip()
packages = ['UshareGui']
data_files = [
('share/icons/hicolor/22x22/apps',['images/22x22/usharegui.png']),
('share/icons/hicolor/24x24/apps',['images/24x24/usharegui.png']),
('share/icons/hicolor/48x48/apps',['images/48x48/usharegui.png']),
('share/applications',['ushare-gui.desktop']),
('share/ushare-gui/data/glade',['data/glade/gui.glade']),
('share/ushare-gui/data/img',['data/img/usharegui.png','data/img/no.png','data/img/usable.png']),
]
setup(
name='ushare-gui',
version=version,
description='Gui for ushare upnp server',
author='Laguillaumie sylvain',
author_email='s.lagui@free.fr',
url='http://penguincape.org',
packages=packages,
scripts=['ushare-gui','ushare-start'],
data_files=data_files,
cmdclass={
'uninstall': uninstall,
'install': install},
)
#Stolen from ccsm's setup.py
if sys.argv[1] == 'install':
prefix = None
if len (sys.argv) > 2:
i = 0
for o in sys.argv:
if o.startswith ("--prefix"):
if o == "--prefix":
if len (sys.argv) >= i:
prefix = sys.argv[i + 1]
sys.argv.remove (prefix)
elif o.startswith ("--prefix=") and len (o[9:]):
prefix = o[9:]
sys.argv.remove (o)
break
i += 1
if not prefix:
prefix = '/usr'
gtk_update_icon_cache = '''gtk-update-icon-cache -f -t \
%s/share/icons/hicolor''' % prefix
root_specified = [s for s in sys.argv if s.startswith('--root')]
if not root_specified or root_specified[0] == '--root=/':
print 'Updating Gtk icon cache.'
os.system(gtk_update_icon_cache)
else:
print '''*** Icon cache not updated. After install, run this:
*** %s''' % gtk_update_icon_cache
os.system('xdg-desktop-menu install --novendor ushare-gui.desktop')
|
smolleyes/Ushare-gui
|
setup.py
|
Python
|
gpl-2.0
| 2,609
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import sys
import glob
import yaml
import argparse
import tempfile
import logging
import re
import shutil
import pprint
from fermipy.utils import mkdir
from fermipy.batch import submit_jobs, add_lsf_args
from fermipy.logger import Logger
from fermipy.gtanalysis import run_gtapp
from fermipy.validate.tools import *
def make_outpath(f, outdir):
filename = os.path.splitext(os.path.basename(f))[0] + '_hist.fits'
if outdir is None:
outdir = os.path.abspath(os.path.dirname(f))
return os.path.join(outdir, filename)
def main():
usage = "usage: %(prog)s [options] "
description = "Run validation analysis"
parser = argparse.ArgumentParser(usage=usage, description=description)
add_lsf_args(parser)
parser.add_argument('--config', default=None, type=str, required=True,
help='Configuration file.')
parser.add_argument('--dataset', default=None, type=str,
help='Key name of data set to analyze. If None then all data '
'sets will be analyzed.')
parser.add_argument('--outdir', default=None, type=str,
help='Path to output directory used when merge=False.')
parser.add_argument('--outfile', default=None, type=str,
help='Path to output file used when merge=True.')
parser.add_argument('--dry_run', default=False, action='store_true')
parser.add_argument('--mode', default='fill', type=str)
parser.add_argument('--overwrite', default=False, action='store_true')
args = parser.parse_args()
# if args.outdir is not None:
# args.outdir = os.path.abspath(args.outdir)
# mkdir(args.outdir)
# if args.mode == 'fill':
# input_files = [[os.path.abspath(x)] for x in args.files]
# output_files = [make_outpath(x,args.outdir) for x in args.files]
# elif args.mode == 'collect':
# input_files = [[os.path.abspath(x) for x in args.files]]
# output_files = [args.outfile]
# print(input_files)
# print(output_files)
config = yaml.safe_load(open(args.config))
if args.batch:
input_files = [[]] * len(config.keys())
output_files = [v['outfile'] for k, v in config.items()]
opts = []
for k, v in config['datasets'].items():
o = vars(args).copy()
del o['batch']
o['dataset'] = k
opts += [o]
submit_jobs('fermipy-validate',
input_files, opts, output_files, overwrite=args.overwrite,
dry_run=args.dry_run)
sys.exit(0)
logger = Logger.get(os.path.basename(__file__), None, logging.INFO)
logger.info('Starting.')
for k, v in config['datasets'].items():
if args.dataset is not None and k != args.dataset:
continue
if v['data_type'] == 'agn':
val = AGNValidator(config['scfile'], 100.)
elif v['data_type'] == 'psr':
val = PSRValidator(config['scfile'], 100.)
elif v['data_type'] == 'ridge':
val = GRValidator(config['scfile'], 100.)
else:
raise Exception('Unknown data type {}'.format(v['data_type']))
infiles = glob.glob(v['files'])
for f in infiles:
print('processing', f)
val.process(f)
val.calc_eff()
if v['data_type'] in ['agn', 'psr']:
val.calc_containment()
print('write', v['outfile'])
val.write(v['outfile'])
logger.info('Done.')
if __name__ == "__main__":
main()
|
fermiPy/fermipy
|
fermipy/scripts/validate.py
|
Python
|
bsd-3-clause
| 3,711
|
# -*- coding: utf-8 -*-
__title__ = 'djangorestframework-jsonapi'
__version__ = '2.1.1'
__author__ = ''
__license__ = 'MIT'
__copyright__ = ''
# Version synonym
VERSION = __version__
|
Instawork/django-rest-framework-json-api
|
rest_framework_json_api/__init__.py
|
Python
|
bsd-2-clause
| 185
|
from gi.repository import Gtk
from toga.interface import SplitContainer as SplitContainerInterface
from ..container import Container
from .base import WidgetMixin
class SplitContainer(SplitContainerInterface, WidgetMixin):
_CONTAINER_CLASS = Container
def __init__(self, id=None, style=None, direction=SplitContainerInterface.VERTICAL):
super().__init__(id=id, style=style, direction=direction)
self._create()
self._ratio = None
def create(self):
if self.direction == self.HORIZONTAL:
self._impl = Gtk.VPaned()
else:
self._impl = Gtk.HPaned()
self._impl._interface = self
def _add_content(self, position, container):
if position >= 2:
raise ValueError('SplitContainer content must be a 2-tuple')
if position == 0:
add = self._impl.add1
elif position == 1:
add = self._impl.add2
add(container._impl)
def _set_app(self, app):
if self._content:
self._content[0].app = self.app
self._content[1].app = self.app
def _set_window(self, window):
if self._content:
self._content[0].window = self.window
self._content[1].window = self.window
def _set_direction(self, value):
pass
def rehint(self):
pass
def _update_child_layout(self):
"""Force a layout update on the widget.
"""
if self.content:
if self.direction == SplitContainer.VERTICAL:
size = self._impl.get_allocation().width
if self._ratio == None:
self._ratio = 0.5
self._impl.set_position(size * self._ratio)
self._containers[0]._update_layout(width=size * self._ratio)
self._containers[1]._update_layout(width=(1.0 - (size * self._ratio)))
else:
size = self._impl.get_allcoation().height
if self._ratio == None:
self._ratio = 0.5
self._impl.set_position(size * self._ratio)
self._containers[0]._update_layout(height=size * self._ratio)
self._containers[1]._update_layout(height=(1.0 - (size * self._ratio)))
|
pybee/toga-gtk
|
toga_gtk/widgets/splitcontainer.py
|
Python
|
bsd-3-clause
| 2,284
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp.osv import osv, fields
# class workflow_instance(osv.osv):
# """"""
# _inherit = 'workflow.instance'
# _columns = {
# ''
# }
class workflow_instance(osv.osv):
""""""
_inherit = 'workflow.instance'
_columns = {
'workitem_ids': fields.one2many('workflow.workitem', 'inst_id', 'Instances')
}
|
jorsea/odoo-addons
|
workflow_view/workflow.py
|
Python
|
agpl-3.0
| 629
|
"""The philharmonic simulator.
Traces geotemporal input data, asks the scheduler to determine actions
and simulates the outcome of the schedule.
(_)(_)
/ \ ssssssimulator
/ | /
/ \ * |
________ / /\__/
_ / \ / /
/ \ / ____ \_/ /
//\ \ / / \ /
V \ \/ / \ /
\___/ \_____/
"""
import pickle
from datetime import datetime
import pprint
from philharmonic import conf
if conf.plotserver:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
else:
import matplotlib.pyplot as plt
import pandas as pd
import philharmonic as ph
from philharmonic.logger import *
import inputgen
from .results import serialise_results
from philharmonic import Schedule
from philharmonic.scheduler.generic.fbf_optimiser import FBFOptimiser
from philharmonic.manager.imanager import IManager
#from philharmonic.cloud.driver import simdriver
from philharmonic.scheduler import NoScheduler
from philharmonic.scheduler.peak_pauser.peak_pauser import PeakPauser
from environment import SimulatedEnvironment, PPSimulatedEnvironment
from philharmonic.utils import loc, common_loc, input_loc
# old scheduler design...
#-------------------------
def geotemporal_inputs():
"""Read time series for el. prices and temperatures
at different locations.
"""
info(" - reading geotemporal inputs")
freq = 'H'
# el. prices
el_prices_pth = 'io/geotemp/el_prices-usa.pkl'
el_prices = pd.read_pickle(el_prices_pth)
# - resample to desired freqency
el_prices = el_prices.resample(freq)
debug(str(el_prices))
# temperatures
temperatures_pth = 'io/geotemp/temperature-usa.pkl'
temperatures = pd.read_pickle(temperatures_pth)
temperatures = temperatures.resample(freq)
debug(str(temperatures))
# common index is actually in temperatures (subset of prices)
return el_prices, temperatures
def server_locations(servers, possible_locations):
"""Change servers by setting a location."""
#Todo: Potentially separate into DCs
for i, s in enumerate(servers):
s.loc = possible_locations[i]
def VM_requests(start, end):
return inputgen.normal_vmreqs(start, end)
def prepare_known_data(dataset, t, future_horizon=None): # TODO: use pd.Panel for dataset
""" @returns a subset of the @param dataset
(a tuple of pd.Series objects)
that is known at moment @param t
"""
future_horizon = future_horizon or pd.offsets.Hour(4)
el_prices, temperatures = dataset # unpack
# known data (past and future up to a point)
known_el_prices = el_prices[:t+future_horizon]
known_temperatures = temperatures[:t+future_horizon]
return known_el_prices, known_temperatures
#TODO:
# - shorthand to access temp, price in server
# new simulator design
#----------------------
class Simulator(IManager):
"""simulates the passage of time and prepares all the data for
the scheduler
"""
factory = {
"scheduler": "PeakPauser",
"environment": "GASimpleSimulatedEnvironment",
"cloud": "peak_pauser_infrastructure",
"driver": "simdriver",
"times": "two_days",
"requests": None, #inputgen.normal_vmreqs,
"servers": None, #inputgen.small_infrastructure,
"el_prices": "simple_el",
"temperature": "simple_temperature",
}
def __init__(self, factory=None, custom_scheduler=None):
if factory is not None:
self.factory = factory
if custom_scheduler is not None:
self.custom_scheduler = custom_scheduler
super(Simulator, self).__init__()
self.environment.el_prices = self._create(inputgen,
self.factory['el_prices'])
self.environment.temperature = self._create(inputgen,
self.factory['temperature'])
SD_el = self.factory['SD_el'] if 'SD_el' in self.factory else 0
SD_temp = self.factory['SD_temp'] if 'SD_temp' in self.factory else 0
self.environment.model_forecast_errors(SD_el, SD_temp)
self.real_schedule = Schedule()
def apply_actions(self, actions):
"""apply actions (or requests) on the cloud (for "real") and log them"""
self.cloud.reset_to_real()
for t, action in actions.iteritems():
#debug('apply %s at time %d'.format(action, t))
self.cloud.apply_real(action)
self.real_schedule.add(action, t)
self.driver.apply_action(action, t)
def prompt(self):
if conf.prompt_show_cloud:
if conf.prompt_ipdb:
import ipdb; ipdb.set_trace()
else:
prompt_res = raw_input('Press enter to continue...')
def show_cloud_usage(self):
self.cloud.show_usage()
self.prompt()
def run(self, steps=None):
"""Run the simulation. Iterate through the times, query for
geotemporal inputs, reevaluate the schedule and simulate actions.
@param steps: number of time steps to make through the input data
(if None, go through the whole input)
"""
if conf.show_cloud_interval is not None:
t_show = conf.start + conf.show_cloud_interval
self.scheduler.initialize()
passed_steps = 0
for t in self.environment.itertimes(): # iterate through all the times
debug('-' * 25 + '\n| t={} |\n'.format(t) + '-' * 25)
passed_steps += 1
if steps is not None and passed_steps > steps:
break
# get requests & update model
# these are the event triggers
# - we find any requests that might arise in this interval
requests = self.environment.get_requests()
# - apply requests on the simulated cloud
self.apply_actions(requests)
if len(requests) > 0:
#import ipdb; ipdb.set_trace()
pass
# call scheduler to decide on actions
schedule = self.scheduler.reevaluate()
self.cloud.reset_to_real()
period = self.environment.get_period()
actions = schedule.filter_current_actions(t, period)
if len(requests) > 0:
debug('Requests:\n{}\n'.format(requests))
if len(actions) > 0:
debug('Applying:\n{}\n'.format(actions))
planned_actions = schedule.filter_current_actions(t + period)
if len(planned_actions) > 0:
debug('Planned:\n{}\n'.format(planned_actions))
self.apply_actions(actions)
if conf.show_cloud_interval is not None and t == t_show:
t_show = t_show + conf.show_cloud_interval
self.show_cloud_usage()
return self.cloud, self.environment, self.real_schedule
# TODO: these other simulator subclasses should not be necessary
class PeakPauserSimulator(Simulator):
def __init__(self, factory=None):
if factory is not None:
self.factory = factory
self.factory["scheduler"] = "PeakPauser"
self.factory["environment"] = "PPSimulatedEnvironment"
super(PeakPauserSimulator, self).__init__()
def run(self): #TODO: use Simulator.run instead
"""go through all the timesteps and call the scheduler to ask for
actions
"""
self.environment.times = range(24)
self.environment._period = pd.offsets.Hour(1)
self.scheduler.initialize()
for hour in self.environment.times:
# TODO: set time in the environment instead of here
timestamp = pd.Timestamp('2013-02-20 {0}:00'.format(hour))
self.environment.set_time(timestamp)
# call scheduler to create new cloud state (if an action is made)
schedule = self.scheduler.reevaluate()
# TODO: when an action is applied to the current state, forward it
# to the driver as well
period = self.environment.get_period()
actions = schedule.filter_current_actions(timestamp, period)
self.apply_actions(actions)
# TODO: use schedule instance
#events = self.cloud.driver.events
from philharmonic.scheduler import FBFScheduler
from philharmonic.simulator.environment import FBFSimpleSimulatedEnvironment
class FBFSimulator(Simulator):
def __init__(self, factory=None):
if factory is not None:
self.factory = factory
self.factory["scheduler"] = "FBFScheduler"
self.factory["environment"] = "FBFSimpleSimulatedEnvironment"
super(FBFSimulator, self).__init__()
class NoSchedulerSimulator(Simulator):
def __init__(self):
self.factory["scheduler"] = "NoScheduler"
super(NoSchedulerSimulator, self).__init__()
#-- common functions --------------------------------
def log_config_info(simulator):
"""Log the essential configuration information."""
info('- output_folder: {}'.format(conf.output_folder))
if conf.factory["times"] == "times_from_conf":
info('- times: {} - {}'.format(conf.start, conf.end))
if conf.factory["el_prices"] == "el_prices_from_conf":
info('- el_price_dataset: {}'.format(conf.el_price_dataset))
if conf.factory["temperature"] == "temperature_from_conf":
info('- temperature_dataset: {}'.format(conf.temperature_dataset))
info('- forecasting:')
info(' * periods: {}'.format(conf.factory['forecast_periods']))
info(' * errors: SD_el={}, SD_temp={}'.format(
conf.factory['SD_el'], conf.factory['SD_temp']
))
info('- power_model: {}'.format(conf.power_model))
info('\n- scheduler: {}'.format(conf.factory['scheduler']))
if conf.factory['scheduler_conf'] is not None:
info(' * conf: \n{}'.format(
pprint.pformat(conf.factory['scheduler_conf'])
))
info('\nServers ({} -> will copy to: {})\n-------\n{}'.format(
common_loc('workload/servers.pkl'),
os.path.relpath(input_loc('servers.pkl')),
simulator.cloud.servers
#pprint.pformat(simulator.cloud.servers)
#simulator.cloud.show_usage()
))
if conf.power_freq_model is not False:
info('\n- freq. scale from {} to {} by {}.'.format(
conf.freq_scale_min, conf.freq_scale_max, conf.freq_scale_delta
))
info('\nRequests ({} -> will copy to: {})\n--------\n{}\n'.format(
common_loc('workload/requests.pkl'),
os.path.relpath(input_loc('requests.pkl')),
simulator.requests)
)
if conf.prompt_configuration:
prompt_res = raw_input('Config good? Press enter to continue...')
def archive_inputs(simulator):
"""copy input files together with the results (for archive reasons)"""
with open(input_loc('servers.pkl'), 'w') as pkl_srv:
pickle.dump(simulator.cloud, pkl_srv)
simulator.requests.to_pickle(input_loc('requests.pkl'))
def before_start(simulator):
log_config_info(simulator)
archive_inputs(simulator)
#-- simulation starter ------------------------------
# schedule.py routes straight to here
# TODO: make run a method of Simulator maybe?
def run(steps=None, custom_scheduler=None):
"""Run the simulation."""
info('\nSETTINGS\n########\n')
# create simulator from the conf
#-------------------------------
simulator = Simulator(conf.get_factory(), custom_scheduler)
before_start(simulator)
# run the simulation
#-------------------
info('\nSIMULATION\n##########\n')
start_time = datetime.now()
info('Simulation started at time: {}'.format(start_time))
cloud, env, schedule = simulator.run(steps)
info('RESULTS\n#######\n')
# serialise and log the results
#------------------------------
results = serialise_results(cloud, env, schedule)
end_time = datetime.now()
info('Simulation finished at time: {}'.format(end_time))
info('Duration: {}\n'.format(end_time - start_time))
return results
if __name__ == "__main__":
run()
#-----------------------------------------------------
|
philharmonic/philharmonic
|
philharmonic/simulator/simulator.py
|
Python
|
gpl-3.0
| 12,350
|
import os
import numpy as np
from . import noise
from . import support
from . import circle
def generate_diff(config):
solid_unit = make_3D_duck(shape = config['sample']['shape'])
Solid_unit = np.fft.fftn(solid_unit, config['detector']['shape'])
solid_unit_expanded = np.fft.ifftn(Solid_unit)
diff = np.abs(Solid_unit)**2
# add noise
if config['detector']['photons'] is not None :
diff, edges = noise.add_noise_3d(diff, config['detector']['photons'], \
remove_courners = config['detector']['cut_courners'],\
unit_cell_size = config['sample']['diameter'])
else :
edges = np.ones_like(diff, dtype=np.bool)
# add circle
if config['detector']['add_circle'] is not None :
#print 'adding circular background:'
background_circle = np.max(diff) * 0.001 * ~circle.make_beamstop(diff.shape, config['detector']['add_circle'])
diff += background_circle
else :
background_circle = None
# define the solid_unit support
if config['sample']['support_frac'] is not None :
S = support.expand_region_by(solid_unit_expanded > 0.1, config['sample']['support_frac'])
else :
S = solid_unit_expanded > (solid_unit_expanded.min() + 1.0e-5)
# add a beamstop
if config['detector']['beamstop'] is not None :
beamstop = circle.make_beamstop(diff.shape, config['detector']['beamstop'])
diff *= beamstop
else :
beamstop = np.ones_like(diff, dtype=np.bool)
return diff, beamstop, background_circle, edges, S, solid_unit_expanded
def interp_3d(array, shapeout):
from scipy.interpolate import griddata
ijk = np.indices(array.shape)
points = np.zeros((array.size, 3), dtype=np.float)
points[:, 0] = ijk[0].ravel()
points[:, 1] = ijk[1].ravel()
points[:, 2] = ijk[2].ravel()
values = array.astype(np.float).ravel()
gridout = np.mgrid[0: array.shape[0]-1: shapeout[0]*1j, \
0: array.shape[1]-1: shapeout[1]*1j, \
0: array.shape[2]-1: shapeout[2]*1j]
arrayout = griddata(points, values, (gridout[0], gridout[1], gridout[2]), method='nearest')
return arrayout
def make_3D_duck(shape = (12, 25, 30)):
fnam = os.path.dirname(os.path.realpath(__file__))
fnam = os.path.join(fnam, 'duck_300_211_8bit.raw')
# call in a low res 2d duck image
duck = np.fromfile(fnam, dtype=np.int8).reshape((211, 300))
# convert to bool
duck = duck < 50
# make a 3d volume
duck3d = np.zeros( (100,) + duck.shape , dtype=np.bool)
# loop over the third dimension with an expanding circle
i, j = np.mgrid[0 :duck.shape[0], 0 :duck.shape[1]]
origin = [150, 150]
r = np.sqrt( ((i-origin[0])**2 + (j-origin[1])**2).astype(np.float) )
rs = range(50) + range(50, 0, -1)
rs = np.array(rs) * 200 / 50.
circle = lambda ri : r < ri
for z in range(duck3d.shape[0]):
duck3d[z, :, :] = circle(rs[z]) * duck
# now interpolate the duck onto the required grid
duck3d = interp_3d(duck3d, shape)
# get rid of the crap
duck3d[np.where(duck3d < 0.1)] = 0.0
return duck3d
if __name__ == '__main__':
duck3d = make_3D_duck()
|
andyofmelbourne/crappy-crystals
|
utils/phasing_3d/utils/duck.py
|
Python
|
gpl-3.0
| 3,322
|
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module, cpython_only
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_threading.py
|
Python
|
gpl-3.0
| 39,157
|
import argparse
import os
import logging
import numpy as np
import glyph.application
import glyph.assessment
from glyph.utils.argparse import (
positive_int,
non_negative_int,
np_infinity_int,
readable_file,
readable_yaml_file,
)
logger = logging.getLogger(__name__)
try:
import gooey
from gooey import Gooey, GooeyParser
@Gooey(
auto_start=False,
advanced=True,
encoding="utf-8",
language="english",
show_config=True,
default_size=(1200, 1000),
dump_build_config=False,
load_build_config=None,
monospace_display=False,
disable_stop_button=False,
show_stop_warning=True,
force_stop_is_error=True,
show_success_modal=True,
run_validators=True,
poll_external_updates=False,
return_to_config=False,
disable_progress_bar_animation=False,
navigation="SIDEBAR",
tabbed_groups=True,
navigation_title="Actions",
show_sidebar=False,
progress_regex=r"^.*INFO\D+\d+\D+(?P<gen>[0-9]+)\D+\d+[.]{1}\d+\D+\d+[.]{1}\d+.*$",
progress_expr="(gen + 1) % 10 / 10 * 100",
)
def get_gooey(prog="glyph-remote"):
probably_fork = "site-packages" not in gooey.__file__
logger.debug("Gooey located at {}.".format(gooey.__file__))
if not probably_fork:
logger.warning("GUI input validators may have no effect")
parser = GooeyParser(prog=prog)
return parser
GUI_AVAILABLE = True
except ImportError as e:
logger.error(e)
GUI_AVAILABLE = False
GUI_UNAVAILABLE_MSG = """Could not start gui extention.
You need to install the gui extras.
Use the command 'pip install glyph[gui]' to do so."""
class MyGooeyMixin:
def add_argument(self, *args, **kwargs):
for key in ["widget"]:
if key in kwargs:
del kwargs[key]
super().add_argument(*args, **kwargs)
def add_mutually_exclusive_group(self, *args, **kwargs):
group = MutuallyExclusiveGroup(self, *args, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def add_argument_group(self, *args, **kwargs):
group = ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
class Parser(MyGooeyMixin, argparse.ArgumentParser):
pass
class ArgumentGroup(MyGooeyMixin, argparse._ArgumentGroup):
pass
class MutuallyExclusiveGroup(MyGooeyMixin, argparse._MutuallyExclusiveGroup):
pass
def get_parser(parser=None):
if parser is None:
parser = Parser()
if isinstance(parser, Parser):
parser.add_argument("--gui", action="store_true", default=False)
gui_active = GUI_AVAILABLE and isinstance(parser, GooeyParser)
parser.add_argument(
"--port", type=positive_int, default=5555, help="Port for the zeromq communication (default: 5555)",
)
parser.add_argument("--ip", type=str, default="localhost", help="IP of the client (default: localhost)")
parser.add_argument(
"--send_meta_data", action="store_true", default=False, help="Send metadata after each generation"
)
parser.add_argument(
"-v",
"--verbose",
type=str.upper,
dest="verbosity",
choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"],
default="INFO",
help="Set logging level",
)
parser.add_argument(
"--logging",
"-l",
dest="logging_config",
type=str,
default="logging.yaml",
help="set config file for logging; overrides --verbose (default: logging.yaml)",
widget="FileChooser",
)
config = parser.add_argument_group("config")
group = config.add_mutually_exclusive_group(required=gui_active)
group.add_argument(
"--remote",
action="store_true",
dest="remote",
default=False,
help="Request GP configs from experiment handler.",
)
group.add_argument(
"--cfile",
dest="cfile",
type=readable_yaml_file,
help="Read GP configs from file",
widget="FileChooser",
)
glyph.application.Application.add_options(parser)
cp_group = parser.add_mutually_exclusive_group(required=gui_active)
cp_group.add_argument("--ndim", type=positive_int, default=1)
cp_group.add_argument(
"--resume",
dest="resume_file",
metavar="FILE",
type=readable_file,
help="continue previous run from a checkpoint file",
widget="FileChooser",
)
cp_group.add_argument(
"-o",
dest="checkpoint_file",
metavar="FILE",
type=str,
default=os.path.join(".", "checkpoint.pickle"),
help="checkpoint to FILE (default: ./checkpoint.pickle)",
widget="FileChooser",
)
glyph.application.AlgorithmFactory.add_options(parser.add_argument_group("algorithm"))
group_breeding = parser.add_argument_group("breeding")
glyph.application.MateFactory.add_options(group_breeding)
glyph.application.MutateFactory.add_options(group_breeding)
glyph.application.SelectFactory.add_options(group_breeding)
glyph.application.CreateFactory.add_options(group_breeding)
ass_group = parser.add_argument_group("assessment")
ass_group.add_argument(
"--simplify",
action="store_true",
default=False,
help="Simplify expression before sending them. (default: False)",
)
ass_group.add_argument(
"--complexity_measure",
choices=["None"] + list(glyph.assessment.complexity_measures.keys()),
default=None,
help="Consider the complexity of solutions for MOO (default: None)",
)
ass_group.add_argument(
"--no_caching",
dest="caching",
action="store_false",
default=True,
help="Cache evaluation (default: False)",
)
ass_group.add_argument(
"--persistent_caching",
default=None,
help="Key for persistent data base cache for caching between experiments (default: None)",
)
ass_group.add_argument(
"--max_fev_const_opt",
type=non_negative_int,
default=100,
help="Maximum number of function evaluations for constant optimization (default: 100)",
)
ass_group.add_argument(
"--directions",
type=positive_int,
default=5,
help="Directions for the stochastic hill-climber (default: 5 only used in conjunction with --const_opt_method hill_climb)",
)
ass_group.add_argument(
"--precision", type=non_negative_int, default=3, help="Precision of constants (default: 3)",
)
ass_group.add_argument(
"--const_opt_method",
choices=["hill_climb", "Nelder-Mead"],
default="Nelder-Mead",
help="Algorithm to optimize constants given a structure (default: Nelder-Mead)",
)
ass_group.add_argument(
"--structural_constants",
action="store_true",
default=False,
help="Make use of structural constants. (default: False)",
)
ass_group.add_argument(
"--sc_min", type=float, default=-1, help="Minimum value of sc for scaling. (default: -1)"
)
ass_group.add_argument(
"--sc_max", type=float, default=1, help="Maximum value of sc for scaling. (default: 1)"
)
ass_group.add_argument(
"--smart", action="store_true", default=False, help="Use smart constant optimization. (default: False)"
)
ass_group.add_argument(
"--smart_step_size",
type=non_negative_int,
default=10,
help="Number of fev in iterative function optimization. (default: 10)",
)
ass_group.add_argument(
"--smart_min_stat",
type=non_negative_int,
default=10,
help="Number of samples required prior to stopping (default: 10)",
)
ass_group.add_argument(
"--smart_threshold",
type=non_negative_int,
default=25,
help="Quantile of improvement rate. Abort constant optimization if below (default: 25)",
)
ass_group.add_argument(
"--chunk_size",
type=positive_int,
default=30,
help="Number of individuals send per single request. (default: 30)",
)
ass_group.add_argument(
"--multi_objective",
action="store_true",
default=False,
help="Returned fitness is multi-objective (default: False)",
)
ass_group.add_argument(
"--send_symbolic",
action="store_true",
default=False,
help="Send the expression with symbolic constants (default: False)",
)
ass_group.add_argument(
"--re_evaluate",
action="store_true",
default=False,
help="Re-evaluate old individuals (default: False)",
)
break_condition = parser.add_argument_group("break condition")
break_condition.add_argument(
"--ttl",
type=int,
default=-1,
help="Time to life (in seconds) until soft shutdown. -1 = no ttl (default: -1)",
)
break_condition.add_argument(
"--target", type=float, default=0, help="Target error used in stopping criteria (default: 0)"
)
break_condition.add_argument(
"--max_iter_total",
type=np_infinity_int,
default=np.infty,
help="Maximum number of function evaluations (default: 'inf' [stands for np.infty])",
)
constraints = parser.add_argument_group("constraints")
glyph.application.ConstraintsFactory.add_options(constraints)
observer = parser.add_argument_group("observer")
observer.add_argument(
"--animate",
action="store_true",
default=False,
help="Animate the progress of evolutionary optimization. (default: False)",
)
return parser
|
Ambrosys/glyph
|
glyph/cli/_parser.py
|
Python
|
lgpl-3.0
| 9,875
|
"""Platform for beewi_smartclim integration."""
from __future__ import annotations
from beewi_smartclim import BeewiSmartClimPoller # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
)
from homeassistant.const import CONF_MAC, CONF_NAME, PERCENTAGE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
# Default values
DEFAULT_NAME = "BeeWi SmartClim"
# Sensor config
SENSOR_TYPES = [
[SensorDeviceClass.TEMPERATURE, "Temperature", TEMP_CELSIUS],
[SensorDeviceClass.HUMIDITY, "Humidity", PERCENTAGE],
[SensorDeviceClass.BATTERY, "Battery", PERCENTAGE],
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the beewi_smartclim platform."""
mac = config[CONF_MAC]
prefix = config[CONF_NAME]
poller = BeewiSmartClimPoller(mac)
sensors = []
for sensor_type in SENSOR_TYPES:
device = sensor_type[0]
name = sensor_type[1]
unit = sensor_type[2]
# `prefix` is the name configured by the user for the sensor, we're appending
# the device type at the end of the name (garden -> garden temperature)
if prefix:
name = f"{prefix} {name}"
sensors.append(BeewiSmartclimSensor(poller, name, mac, device, unit))
add_entities(sensors)
class BeewiSmartclimSensor(SensorEntity):
"""Representation of a Sensor."""
def __init__(self, poller, name, mac, device, unit):
"""Initialize the sensor."""
self._poller = poller
self._attr_name = name
self._device = device
self._attr_native_unit_of_measurement = unit
self._attr_device_class = self._device
self._attr_unique_id = f"{mac}_{device}"
def update(self):
"""Fetch new state data from the poller."""
self._poller.update_sensor()
self._attr_native_value = None
if self._device == SensorDeviceClass.TEMPERATURE:
self._attr_native_value = self._poller.get_temperature()
if self._device == SensorDeviceClass.HUMIDITY:
self._attr_native_value = self._poller.get_humidity()
if self._device == SensorDeviceClass.BATTERY:
self._attr_native_value = self._poller.get_battery()
|
rohitranjan1991/home-assistant
|
homeassistant/components/beewi_smartclim/sensor.py
|
Python
|
mit
| 2,783
|
#! python
import arcrest.admin
arcrest.admin.cmdline.convertcachestorageformat()
|
jasonbot/arcrest
|
cmdline/convertcachestorageformat.py
|
Python
|
apache-2.0
| 82
|
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
import pandas._period as period
from pandas._period import (Period, IncompatibleFrequency,
get_period_field_arr, _validate_end_alias,
_quarter_to_myear)
from pandas.core.base import _shared_docs
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object, ABCSeries,
is_integer, is_float, is_object_dtype)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util.decorators import Appender, cache_readonly, Substitution
from pandas.lib import Timedelta
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.core.missing as missing
from pandas.compat import zip, u
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self.values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _get_ordinals(data, freq):
f = lambda x: Period(x, freq=freq).ordinal
if isinstance(data[0], Period):
return period.extract_ordinals(data, freq)
else:
return lib.map_infer(data, f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self.values, opname)
other_base, _ = _gfc(other.freq)
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = getattr(self.values, opname)(other.values)
mask = (missing.mask_missing(self.values, tslib.iNaT) |
missing.mask_missing(other.values, tslib.iNaT))
if mask.any():
result[mask] = nat_result
return result
else:
other = Period(other, freq=self.freq)
func = getattr(self.values, opname)
result = func(other.ordinal)
if other.ordinal == tslib.iNaT:
result.fill(nat_result)
mask = self.values == tslib.iNaT
if mask.any():
result[mask] = nat_result
return result
return wrapper
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name', 'freq']
_datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'qyear', 'freq',
'days_in_month', 'daysinmonth',
'to_timestamp', 'asfreq', 'start_time', 'end_time']
_is_numeric_dtype = False
_infer_as_myclass = True
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, **kwargs):
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=False)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if not isinstance(data, (np.ndarray, PeriodIndex,
DatetimeIndex, Int64Index)):
if lib.isscalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = com._ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq).ordinal for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = com._ensure_object(data)
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
data = _get_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data.values,
base1, base2, 1)
else:
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
try:
data = com._ensure_int64(data)
except (TypeError, ValueError):
data = com._ensure_object(data)
data = _get_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not getattr(values, 'dtype', None):
values = np.array(values, copy=False)
if is_object_dtype(values):
return PeriodIndex(values, name=name, freq=freq, **kwargs)
result = object.__new__(cls)
result._data = values
result.name = name
if freq is None:
raise ValueError('freq is not specified')
result.freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
def _shallow_copy(self, values=None, **kwargs):
if kwargs.get('freq') is None:
# freq must be provided
kwargs['freq'] = self.freq
return super(PeriodIndex, self)._shallow_copy(values=values, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
@property
def _na_value(self):
return self._box_func(tslib.iNaT)
def __contains__(self, key):
if not isinstance(key, Period) or key.freq != self.freq:
if isinstance(key, compat.string_types):
try:
self.get_loc(key)
return True
except Exception:
return False
return False
return key.ordinal in self._engine
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if (func is np.add):
return self._add_delta(context[1][1])
elif (func is np.subtract):
return self._add_delta(-context[1][1])
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if com.is_bool_dtype(result):
return result
return PeriodIndex(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
"""
return self.asobject.values
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self.values[mask].searchsorted(where_idx.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.values < self.values[first])] = -1
return result
def _array_values(self):
return self.asobject
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return Index(np.array(list(self), dtype), dtype)
elif dtype == _INT64_DTYPE:
return Index(self.values, dtype)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
@Substitution(klass='PeriodIndex', value='key')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, key, side='left', sorter=None):
if isinstance(key, Period):
if key.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, key.freqstr)
raise IncompatibleFrequency(msg)
key = key.ordinal
elif isinstance(key, compat.string_types):
key = Period(key, freq=self.freq).ordinal
return self.values.searchsorted(key, side=side, sorter=sorter)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
def asfreq(self, freq=None, how='E'):
"""
Convert the PeriodIndex to the specified frequency `freq`.
Parameters
----------
freq : str
a frequency
how : str {'E', 'S'}
'E', 'END', or 'FINISH' for end,
'S', 'START', or 'BEGIN' for start.
Whether the elements should be aligned to the end
or start within pa period. January 31st ('END') vs.
Janury 1st ('START') for example.
Returns
-------
new : PeriodIndex with the new frequency
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
<class 'pandas.tseries.period.PeriodIndex'>
[2010, ..., 2015]
Length: 6, Freq: A-DEC
>>> pidx.asfreq('M')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-12, ..., 2015-12]
Length: 6, Freq: M
>>> pidx.asfreq('M', how='S')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-01, ..., 2015-01]
Length: 6, Freq: M
"""
how = _validate_end_alias(how)
freq = frequencies.get_standard_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == 'E'
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period.period_asfreq_arr(ordinal, base1, base2, end)
if self.hasnans:
mask = asi8 == tslib.iNaT
new_data[mask] = tslib.iNaT
return self._simple_new(new_data, self.name, freq=freq)
def to_datetime(self, dayfirst=False):
return self.to_timestamp()
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor(
'dayofweek', 10, "The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
'dayofyear', 9, "The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor(
'days_in_month', 11, "The number of days in the month")
daysinmonth = days_in_month
@property
def start_time(self):
return self.to_timestamp(how='start')
@property
def end_time(self):
return self.to_timestamp(how='end')
def _get_object_array(self):
freq = self.freq
return np.array([Period._from_ordinal(ordinal=x, freq=freq)
for x in self.values], copy=False)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self._get_object_array()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'int64'):
try:
other = PeriodIndex(other)
except:
return False
return np.array_equal(self.asi8, other.asi8)
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _maybe_convert_timedelta(self, other):
if isinstance(other, (timedelta, np.timedelta64,
offsets.Tick, Timedelta)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
return nanos // offset_nanos
elif isinstance(other, offsets.DateOffset):
freqstr = frequencies.get_standard_freq(other)
base = frequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
elif isinstance(other, np.ndarray):
if com.is_integer_dtype(other):
return other
elif com.is_timedelta64_dtype(other):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if (nanos % offset_nanos).all() == 0:
return nanos // offset_nanos
# raise when input doesn't have freq
msg = "Input has different freq from PeriodIndex(freq={0})"
raise IncompatibleFrequency(msg.format(self.freqstr))
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
return self.shift(ordinal_delta)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
Returns
-------
shifted : PeriodIndex
"""
values = self.values + n * self.freq.n
if self.hasnans:
values[self._isnan] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return self.inferred_type
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
try:
return _maybe_box(self, super(PeriodIndex, self).get_value(s, key),
series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies.Resolution.get_freq_group(reso)
freqn = frequencies.get_freq_group(self.freq)
vals = self.values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self.values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key),
series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key),
series, key)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
if hasattr(target, 'freq') and target.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, target.freqstr)
raise IncompatibleFrequency(msg)
return Index.get_indexer(self, target, method, limit, tolerance)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
try:
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
# as we have an invalid type
raise KeyError(key)
try:
return Index.get_loc(self, key.ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem']
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, compat.string_types):
try:
_, parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
except Exception:
raise KeyError(label)
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice', label)
return label
def _parsed_string_to_bounds(self, reso, parsed):
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second, freq='S')
else:
raise KeyError(reso)
return (t1.asfreq(self.freq, how='start'),
t1.asfreq(self.freq, how='end'))
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies.Resolution.get_freq_group(reso)
freqn = frequencies.get_freq_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
def _convert_tolerance(self, tolerance):
tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance)
return self._maybe_convert_timedelta(tolerance)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
super(PeriodIndex, self)._assert_can_do_setop(other)
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = PeriodIndex(rawarr, freq=self.freq)
return rawarr
def __getitem__(self, key):
getitem = self._data.__getitem__
if lib.isscalar(key):
val = getitem(key)
return Period(ordinal=val, freq=self.freq)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
result = getitem(key)
if result.ndim > 1:
# MPL kludge
# values = np.asarray(list(values), dtype=object)
# return values.reshape(result.shape)
return PeriodIndex(result, name=self.name, freq=self.freq)
return PeriodIndex(result, name=self.name, freq=self.freq)
def _format_native_types(self, na_rep=u('NaT'), date_format=None,
**kwargs):
values = np.array(list(self), dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = ~mask
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: u('%s') % dt
values[imask] = np.array([formatter(dt) for dt in values[imask]])
return values
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
if isinstance(to_concat[0], PeriodIndex):
if len(set([x.freq for x in to_concat])) > 1:
# box
to_concat = [x.asobject.values for x in to_concat]
else:
cat_values = np.concatenate([x.values for x in to_concat])
return PeriodIndex(cat_values, freq=self.freq, name=name)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
def repeat(self, n, *args, **kwargs):
"""
Return a new Index of the values repeated `n` times.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
# overwrites method from DatetimeIndexOpsMixin
return self._shallow_copy(self.values.repeat(n))
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(PeriodIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backcompat
self.freq = Period._maybe_convert_freq(own_state[1])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(self, state)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using
pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
def tz_localize(self, tz, infer_dst=False):
"""
Localize tz-naive DatetimeIndex to given time zone (using
pytz/dateutil), or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
infer_dst : boolean, default False
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
PeriodIndex._add_numeric_methods_disabled()
PeriodIndex._add_logical_methods_disabled()
PeriodIndex._add_datetimelike_methods()
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify 2 of start, end, periods')
if freq is not None:
_, mult = _gfc(freq)
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('Start and end must have same freq')
if ((is_start_per and start.ordinal == tslib.iNaT) or
(is_end_per and end.ordinal == tslib.iNaT)):
raise ValueError('Start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(end.ordinal - periods + mult,
end.ordinal + 1, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = frequencies.FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if base != frequencies.FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(period.period_ordinal(
y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) for x in fields]
return arrays
def pnow(freq=None):
return Period(datetime.now(), freq=freq)
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start : starting value, period-like, optional
end : ending value, period-like, optional
periods : int, default None
Number of periods in the index
freq : str/DateOffset, default 'D'
Frequency alias
name : str, default None
Name for the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
"""
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
|
BigDataforYou/movie_recommendation_workshop_1
|
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tseries/period.py
|
Python
|
mit
| 38,437
|
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
from django.shortcuts import render as dj_render
from .. import site
from ..auth import is_admin_session, update_admin_session
from .auth import login
__ALL__ = ['get_protected_namespace', 'render', 'protected_admin_view']
def get_protected_namespace(request):
for namespace in settings.MISAGO_ADMIN_NAMESPACES:
try:
admin_path = reverse('%s:index' % namespace)
if request.path_info.startswith(admin_path):
return namespace
except NoReverseMatch:
pass
else:
return None
def render(request, template, context=None, error_page=False):
context = context or {}
navigation = site.visible_branches(request)
sections = navigation[0]
try:
actions = navigation[1]
except IndexError:
actions = []
try:
pages = navigation[2]
except IndexError:
pages = []
context.update({'sections': sections, 'actions': actions, 'pages': pages})
if error_page:
# admittedly haxy solution for displaying navs on error pages
context['actions'] = []
context['pages'] = []
for item in navigation[0]:
item['is_active'] = False
else:
context['active_link'] = None
for item in navigation[-1]:
if item['is_active']:
context['active_link'] = item
break
return dj_render(request, template, context)
# Decorator for views
def protected_admin_view(f):
def decorator(request, *args, **kwargs):
protected_view = get_protected_namespace(request)
if protected_view:
if is_admin_session(request):
update_admin_session(request)
return f(request, *args, **kwargs)
else:
request.admin_namespace = protected_view
return login(request)
else:
return f(request, *args, **kwargs)
return decorator
|
1905410/Misago
|
misago/admin/views/__init__.py
|
Python
|
gpl-2.0
| 2,051
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Packaging module for snf-cyclades-gtools"""
import os
from imp import load_source
from setuptools import setup
HERE = os.path.abspath(os.path.normpath(os.path.dirname(__file__)))
VERSION_PY = os.path.join(HERE, 'synnefo', 'versions', 'ganeti.py')
setup(
name="snf-cyclades-gtools",
version=getattr(load_source('VERSION', VERSION_PY), '__version__'),
description="Synnefo tools for interaction with Ganeti",
url="http://www.synnefo.org/",
author='Synnefo development team',
author_email='synnefo-devel@googlegroups.com',
maintainer='Synnefo development team',
maintainer_email='synnefo-devel@googlegroups.com',
license="GNU GPLv3",
namespace_packages=["synnefo", "synnefo.versions"],
packages=["synnefo", "synnefo.ganeti", "synnefo.versions"],
dependency_links=['http://www.synnefo.org/packages/pypi'],
install_requires=[
'snf-common',
'python-daemon>=1.5.5',
'pyinotify>=0.8.9',
'puka',
'setproctitle>=1.0.1'
],
entry_points={
'console_scripts': [
'snf-ganeti-eventd = synnefo.ganeti.eventd:main',
'snf-progress-monitor = synnefo.ganeti.progress_monitor:main'
],
'synnefo': [
'default_settings = synnefo.ganeti.settings'
]
},
)
|
grnet/synnefo
|
snf-cyclades-gtools/setup.py
|
Python
|
gpl-3.0
| 2,033
|
import json
json_data=open('raw.json').read()
data = json.loads(json_data)
elements = {}
for elt in data['PERIODIC_TABLE']['ATOM']:
symbol = elt['SYMBOL']
Z = elt['ATOMIC_NUMBER']
elements[symbol] = {'Z': Z}
f = open('elements.json', 'w')
f.write(json.dumps(elements))
|
GRIFFINCollaboration/beamCompanionExplorer
|
munging/elements/munge.py
|
Python
|
mit
| 279
|
# -*- coding: utf-8 -*-
"""
Tests for the user interface elements of Mu.
"""
from PyQt5.QtWidgets import QApplication, QMessageBox, QLabel
from PyQt5.QtChart import QChart, QLineSeries, QValueAxis
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QTextCursor
from unittest import mock
import sys
import os
import signal
import mu
import platform
from collections import deque
import mu.interface.panes
# Required so the QWidget tests don't abort with the message:
# "QWidget: Must construct a QApplication before a QWidget"
# The QApplication need only be instantiated once.
app = QApplication([])
def test_PANE_ZOOM_SIZES():
"""
Ensure the expected entries define font sizes in PANE_ZOOM_SIZES.
"""
expected_sizes = ('xs', 's', 'm', 'l', 'xl', 'xxl', 'xxxl')
for size in expected_sizes:
assert size in mu.interface.panes.PANE_ZOOM_SIZES
assert len(expected_sizes) == len(mu.interface.panes.PANE_ZOOM_SIZES)
def test_MicroPythonREPLPane_init_default_args():
"""
Ensure the MicroPython REPLPane object is instantiated as expected.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
assert rp.serial == mock_serial
def test_MicroPythonREPLPane_paste():
"""
Pasting into the REPL should send bytes via the serial connection.
"""
mock_serial = mock.MagicMock()
mock_clipboard = mock.MagicMock()
mock_clipboard.text.return_value = 'paste me!'
mock_application = mock.MagicMock()
mock_application.clipboard.return_value = mock_clipboard
with mock.patch('mu.interface.panes.QApplication', mock_application):
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.paste()
mock_serial.write.assert_called_once_with(bytes('paste me!', 'utf8'))
def test_MicroPythonREPLPane_paste_handle_unix_newlines():
"""
Pasting into the REPL should handle '\n' properly.
'\n' -> '\r'
"""
mock_serial = mock.MagicMock()
mock_clipboard = mock.MagicMock()
mock_clipboard.text.return_value = 'paste\nme!'
mock_application = mock.MagicMock()
mock_application.clipboard.return_value = mock_clipboard
with mock.patch('mu.interface.panes.QApplication', mock_application):
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.paste()
mock_serial.write.assert_called_once_with(bytes('paste\rme!', 'utf8'))
def test_MicroPythonREPLPane_paste_handle_windows_newlines():
"""
Pasting into the REPL should handle '\r\n' properly.
'\r\n' -> '\r'
"""
mock_serial = mock.MagicMock()
mock_clipboard = mock.MagicMock()
mock_clipboard.text.return_value = 'paste\r\nme!'
mock_application = mock.MagicMock()
mock_application.clipboard.return_value = mock_clipboard
with mock.patch('mu.interface.panes.QApplication', mock_application):
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.paste()
mock_serial.write.assert_called_once_with(bytes('paste\rme!', 'utf8'))
def test_MicroPythonREPLPane_paste_only_works_if_there_is_something_to_paste():
"""
Pasting into the REPL should send bytes via the serial connection.
"""
mock_serial = mock.MagicMock()
mock_clipboard = mock.MagicMock()
mock_clipboard.text.return_value = ''
mock_application = mock.MagicMock()
mock_application.clipboard.return_value = mock_clipboard
with mock.patch('mu.interface.panes.QApplication', mock_application):
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.paste()
assert mock_serial.write.call_count == 0
def test_MicroPythonREPLPane_context_menu():
"""
Ensure the context menu for the REPL is configured correctly for non-OSX
platforms.
"""
mock_serial = mock.MagicMock()
mock_platform = mock.MagicMock()
mock_platform.system.return_value = 'WinNT'
mock_qmenu = mock.MagicMock()
mock_qmenu_class = mock.MagicMock(return_value=mock_qmenu)
with mock.patch('mu.interface.panes.platform', mock_platform), \
mock.patch('mu.interface.panes.QMenu', mock_qmenu_class), \
mock.patch('mu.interface.panes.QCursor'):
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.context_menu()
assert mock_qmenu.addAction.call_count == 2
copy_action = mock_qmenu.addAction.call_args_list[0][0]
assert copy_action[0] == 'Copy'
assert copy_action[1] == rp.copy
assert copy_action[2].toString() == 'Ctrl+Shift+C'
paste_action = mock_qmenu.addAction.call_args_list[1][0]
assert paste_action[0] == 'Paste'
assert paste_action[1] == rp.paste
assert paste_action[2].toString() == 'Ctrl+Shift+V'
assert mock_qmenu.exec_.call_count == 1
def test_MicroPythonREPLPane_context_menu_darwin():
"""
Ensure the context menu for the REPL is configured correctly for non-OSX
platforms.
"""
mock_serial = mock.MagicMock()
mock_platform = mock.MagicMock()
mock_platform.system.return_value = 'Darwin'
mock_qmenu = mock.MagicMock()
mock_qmenu_class = mock.MagicMock(return_value=mock_qmenu)
with mock.patch('mu.interface.panes.platform', mock_platform), \
mock.patch('mu.interface.panes.QMenu', mock_qmenu_class), \
mock.patch('mu.interface.panes.QCursor'):
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.context_menu()
assert mock_qmenu.addAction.call_count == 2
copy_action = mock_qmenu.addAction.call_args_list[0][0]
assert copy_action[0] == 'Copy'
assert copy_action[1] == rp.copy
assert copy_action[2].toString() == 'Ctrl+C'
paste_action = mock_qmenu.addAction.call_args_list[1][0]
assert paste_action[0] == 'Paste'
assert paste_action[1] == rp.paste
assert paste_action[2].toString() == 'Ctrl+V'
assert mock_qmenu.exec_.call_count == 1
def test_MicroPythonREPLPane_keyPressEvent():
"""
Ensure key presses in the REPL are handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_A)
data.text = mock.MagicMock(return_value='a')
data.modifiers = mock.MagicMock(return_value=None)
rp.keyPressEvent(data)
mock_serial.write.assert_called_once_with(bytes('a', 'utf-8'))
def test_MicroPythonREPLPane_keyPressEvent_backspace():
"""
Ensure backspaces in the REPL are handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_Backspace)
data.text = mock.MagicMock(return_value='\b')
data.modifiers = mock.MagicMock(return_value=None)
rp.keyPressEvent(data)
mock_serial.write.assert_called_once_with(b'\b')
def test_MicroPythonREPLPane_keyPressEvent_delete():
"""
Ensure delete in the REPL is handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_Delete)
data.text = mock.MagicMock(return_value='\b')
data.modifiers = mock.MagicMock(return_value=None)
rp.keyPressEvent(data)
mock_serial.write.assert_called_once_with(b'\x1B[\x33\x7E')
def test_MicroPythonREPLPane_keyPressEvent_up():
"""
Ensure up arrows in the REPL are handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_Up)
data.text = mock.MagicMock(return_value='1b')
data.modifiers = mock.MagicMock(return_value=None)
rp.keyPressEvent(data)
mock_serial.write.assert_called_once_with(b'\x1B[A')
def test_MicroPythonREPLPane_keyPressEvent_down():
"""
Ensure down arrows in the REPL are handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_Down)
data.text = mock.MagicMock(return_value='1b')
data.modifiers = mock.MagicMock(return_value=None)
rp.keyPressEvent(data)
mock_serial.write.assert_called_once_with(b'\x1B[B')
def test_MicroPythonREPLPane_keyPressEvent_right():
"""
Ensure right arrows in the REPL are handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_Right)
data.text = mock.MagicMock(return_value='1b')
data.modifiers = mock.MagicMock(return_value=None)
rp.keyPressEvent(data)
mock_serial.write.assert_called_once_with(b'\x1B[C')
def test_MicroPythonREPLPane_keyPressEvent_left():
"""
Ensure left arrows in the REPL are handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_Left)
data.text = mock.MagicMock(return_value='1b')
data.modifiers = mock.MagicMock(return_value=None)
rp.keyPressEvent(data)
mock_serial.write.assert_called_once_with(b'\x1B[D')
def test_MicroPythonREPLPane_keyPressEvent_home():
"""
Ensure home key in the REPL is handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_Home)
data.text = mock.MagicMock(return_value='1b')
data.modifiers = mock.MagicMock(return_value=None)
rp.keyPressEvent(data)
mock_serial.write.assert_called_once_with(b'\x1B[H')
def test_MicroPythonREPLPane_keyPressEvent_end():
"""
Ensure end key in the REPL is handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_End)
data.text = mock.MagicMock(return_value='1b')
data.modifiers = mock.MagicMock(return_value=None)
rp.keyPressEvent(data)
mock_serial.write.assert_called_once_with(b'\x1B[F')
def test_MicroPythonREPLPane_keyPressEvent_CTRL_C_Darwin():
"""
Ensure end key in the REPL is handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.copy = mock.MagicMock()
data = mock.MagicMock()
data.key = mock.MagicMock(return_value=Qt.Key_C)
data.text = mock.MagicMock(return_value='1b')
data.modifiers.return_value = Qt.ControlModifier | Qt.ShiftModifier
rp.keyPressEvent(data)
rp.copy.assert_called_once_with()
def test_MicroPythonREPLPane_keyPressEvent_CTRL_V_Darwin():
"""
Ensure end key in the REPL is handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.paste = mock.MagicMock()
data = mock.MagicMock()
data.key = mock.MagicMock(return_value=Qt.Key_V)
data.text = mock.MagicMock(return_value='1b')
data.modifiers.return_value = Qt.ControlModifier | Qt.ShiftModifier
rp.keyPressEvent(data)
rp.paste.assert_called_once_with()
def test_MicroPythonREPLPane_keyPressEvent_meta():
"""
Ensure backspaces in the REPL are handled correctly.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_M)
data.text = mock.MagicMock(return_value='a')
if platform.system() == 'Darwin':
data.modifiers = mock.MagicMock(return_value=Qt.MetaModifier)
else:
data.modifiers = mock.MagicMock(return_value=Qt.ControlModifier)
rp.keyPressEvent(data)
expected = 1 + Qt.Key_M - Qt.Key_A
mock_serial.write.assert_called_once_with(bytes([expected]))
def test_MicroPythonREPLPane_process_bytes():
"""
Ensure bytes coming from the device to the application are processed as
expected. Backspace is enacted, carriage-return is ignored, newline moves
the cursor position to the end of the line before enacted and all others
are simply inserted.
"""
mock_serial = mock.MagicMock()
mock_tc = mock.MagicMock()
mock_tc.movePosition = mock.MagicMock(side_effect=[True, False, True,
True])
mock_tc.deleteChar = mock.MagicMock(return_value=None)
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.textCursor = mock.MagicMock(return_value=mock_tc)
rp.setTextCursor = mock.MagicMock(return_value=None)
rp.insertPlainText = mock.MagicMock(return_value=None)
rp.ensureCursorVisible = mock.MagicMock(return_value=None)
bs = bytes([8, 13, 10, 65, ]) # \b, \r, \n, 'A'
rp.process_bytes(bs)
rp.textCursor.assert_called_once_with()
assert mock_tc.movePosition.call_count == 4
assert mock_tc.movePosition.call_args_list[0][0][0] == QTextCursor.Down
assert mock_tc.movePosition.call_args_list[1][0][0] == QTextCursor.Down
assert mock_tc.movePosition.call_args_list[2][0][0] == QTextCursor.Left
assert mock_tc.movePosition.call_args_list[3][0][0] == QTextCursor.End
assert rp.setTextCursor.call_count == 3
assert rp.setTextCursor.call_args_list[0][0][0] == mock_tc
assert rp.setTextCursor.call_args_list[1][0][0] == mock_tc
assert rp.setTextCursor.call_args_list[2][0][0] == mock_tc
assert rp.insertPlainText.call_count == 2
assert rp.insertPlainText.call_args_list[0][0][0] == chr(10)
assert rp.insertPlainText.call_args_list[1][0][0] == chr(65)
rp.ensureCursorVisible.assert_called_once_with()
def test_MicroPythonREPLPane_process_bytes_VT100():
"""
Ensure bytes coming from the device to the application are processed as
expected. In this case, make sure VT100 related codes are handled properly.
"""
mock_serial = mock.MagicMock()
mock_tc = mock.MagicMock()
mock_tc.movePosition = mock.MagicMock(return_value=False)
mock_tc.removeSelectedText = mock.MagicMock()
mock_tc.deleteChar = mock.MagicMock(return_value=None)
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.textCursor = mock.MagicMock(return_value=mock_tc)
rp.setTextCursor = mock.MagicMock(return_value=None)
rp.insertPlainText = mock.MagicMock(return_value=None)
rp.ensureCursorVisible = mock.MagicMock(return_value=None)
bs = bytes([
27, 91, ord('1'), ord('A'), # <Esc>[1A
27, 91, ord('1'), ord('B'), # <Esc>[1B
27, 91, ord('1'), ord('C'), # <Esc>[1C
27, 91, ord('1'), ord('D'), # <Esc>[1D
27, 91, ord('K'), # <Esc>[K
])
rp.process_bytes(bs)
rp.textCursor.assert_called_once_with()
assert mock_tc.movePosition.call_count == 6
assert mock_tc.movePosition.call_args_list[0][0][0] == QTextCursor.Down
assert mock_tc.movePosition.call_args_list[1][0][0] == QTextCursor.Up
assert mock_tc.movePosition.call_args_list[2][0][0] == QTextCursor.Down
assert mock_tc.movePosition.call_args_list[3][0][0] == \
QTextCursor.Right
assert mock_tc.movePosition.call_args_list[4][0][0] == QTextCursor.Left
assert mock_tc.movePosition.call_args_list[5][0][0] == \
QTextCursor.EndOfLine
assert mock_tc.movePosition.call_args_list[5][1]['mode'] == \
QTextCursor.KeepAnchor
assert rp.setTextCursor.call_count == 5
assert rp.setTextCursor.call_args_list[0][0][0] == mock_tc
assert rp.setTextCursor.call_args_list[1][0][0] == mock_tc
assert rp.setTextCursor.call_args_list[2][0][0] == mock_tc
assert rp.setTextCursor.call_args_list[3][0][0] == mock_tc
assert rp.setTextCursor.call_args_list[4][0][0] == mock_tc
mock_tc.removeSelectedText.assert_called_once_with()
rp.ensureCursorVisible.assert_called_once_with()
def test_MicroPythonREPLPane_clear():
"""
Ensure setText is called with an empty string.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.setText = mock.MagicMock(return_value=None)
rp.clear()
rp.setText.assert_called_once_with('')
def test_MicroPythonREPLPane_set_font_size():
"""
Ensure the font is updated to the expected point size.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
mock_font = mock.MagicMock()
rp.font = mock.MagicMock(return_value=mock_font)
rp.setFont = mock.MagicMock()
rp.set_font_size(123)
mock_font.setPointSize.assert_called_once_with(123)
rp.setFont.assert_called_once_with(mock_font)
def test_MicroPythonREPLPane_set_zoom():
"""
Ensure the font size is correctly set from the t-shirt size.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.set_font_size = mock.MagicMock()
rp.set_zoom('xxl')
expected = mu.interface.panes.PANE_ZOOM_SIZES['xxl']
rp.set_font_size.assert_called_once_with(expected)
def test_MicroPythonREPLPane_send_commands():
"""
Ensure the list of commands is correctly encoded and bound by control
commands to put the board into and out of raw mode.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
rp.execute = mock.MagicMock()
commands = [
"import os",
"print(os.listdir())",
]
rp.send_commands(commands)
expected = [
b'\x02', # Put the board into raw mode.
b'\r\x03',
b'\r\x03',
b'\r\x03',
b'\r\x01',
b'print("\\n")\r', # Ensure a newline at the start of output.
b'import os\r', # The commands to run.
b'print(os.listdir())\r',
b'\r', # Ensure newline after commands.
b'\x04', # Evaluate the commands.
b'\x02', # Leave raw mode.
]
rp.execute.assert_called_once_with(expected)
def test_MicroPythonREPLPane_execute():
"""
Ensure the first command is sent via serial to the connected device, and
further commands are scheduled for the future.
"""
mock_serial = mock.MagicMock()
rp = mu.interface.panes.MicroPythonREPLPane(mock_serial)
commands = [b'A', b'B', ]
with mock.patch('mu.interface.panes.QTimer') as mock_timer:
rp.execute(commands)
mock_serial.write.assert_called_once_with(b'A')
assert mock_timer.singleShot.call_count == 1
def test_MuFileList_show_confirm_overwrite_dialog():
"""
Ensure the user is notified of an existing file.
"""
mfl = mu.interface.panes.MuFileList()
mock_qmb = mock.MagicMock()
mock_qmb.setIcon = mock.MagicMock(return_value=None)
mock_qmb.setText = mock.MagicMock(return_value=None)
mock_qmb.setWindowTitle = mock.MagicMock(return_value=None)
mock_qmb.exec_ = mock.MagicMock(return_value=QMessageBox.Ok)
mock_qmb_class = mock.MagicMock(return_value=mock_qmb)
mock_qmb_class.Ok = QMessageBox.Ok
mock_qmb_class.Information = QMessageBox.Information
with mock.patch('mu.interface.panes.QMessageBox', mock_qmb_class):
assert mfl.show_confirm_overwrite_dialog()
msg = 'File already exists; overwrite it?'
mock_qmb.setText.assert_called_once_with(msg)
mock_qmb.setWindowTitle.assert_called_once_with('File already exists')
mock_qmb.setIcon.assert_called_once_with(QMessageBox.Information)
def test_MicroPythonDeviceFileList_init():
"""
Check the widget references the user's home and allows drag and drop.
"""
mfs = mu.interface.panes.MicroPythonDeviceFileList('home/path')
assert mfs.home == 'home/path'
assert mfs.dragDropMode() == mfs.DragDrop
def test_MicroPythonDeviceFileList_dropEvent():
"""
Ensure a valid drop event is handled as expected.
"""
mock_event = mock.MagicMock()
source = mu.interface.panes.LocalFileList('homepath')
mock_item = mock.MagicMock()
mock_item.text.return_value = 'foo.py'
source.currentItem = mock.MagicMock(return_value=mock_item)
mock_event.source.return_value = source
mfs = mu.interface.panes.MicroPythonDeviceFileList('homepath')
mfs.disable = mock.MagicMock()
mfs.set_message = mock.MagicMock()
mfs.put = mock.MagicMock()
# Test
mfs.dropEvent(mock_event)
fn = os.path.join('homepath', 'foo.py')
assert mfs.set_message.emit.call_count == 1
mfs.put.emit.assert_called_once_with(fn)
def test_MicroPythonDeviceFileList_dropEvent_wrong_source():
"""
Ensure that only drop events whose origins are LocalFileList objects are
handled.
"""
mock_event = mock.MagicMock()
source = mock.MagicMock()
mock_event.source.return_value = source
mfs = mu.interface.panes.MicroPythonDeviceFileList('homepath')
mfs.findItems = mock.MagicMock()
mfs.dropEvent(mock_event)
assert mfs.findItems.call_count == 0
def test_MicroPythonDeviceFileList_on_put():
"""
A message and list_files signal should be emitted.
"""
mfs = mu.interface.panes.MicroPythonDeviceFileList('homepath')
mfs.set_message = mock.MagicMock()
mfs.list_files = mock.MagicMock()
mfs.on_put('my_file.py')
msg = "'my_file.py' successfully copied to micro:bit."
mfs.set_message.emit.assert_called_once_with(msg)
mfs.list_files.emit.assert_called_once_with()
def test_MicroPythonDeviceFileList_contextMenuEvent():
"""
Ensure that the menu displayed when a file on the micro:bit is
right-clicked works as expected when activated.
"""
mock_menu = mock.MagicMock()
mock_action = mock.MagicMock()
mock_menu.addAction.return_value = mock_action
mock_menu.exec_.return_value = mock_action
mfs = mu.interface.panes.MicroPythonDeviceFileList('homepath')
mock_current = mock.MagicMock()
mock_current.text.return_value = 'foo.py'
mfs.currentItem = mock.MagicMock(return_value=mock_current)
mfs.disable = mock.MagicMock()
mfs.set_message = mock.MagicMock()
mfs.delete = mock.MagicMock()
mfs.mapToGlobal = mock.MagicMock()
mock_event = mock.MagicMock()
with mock.patch('mu.interface.panes.QMenu', return_value=mock_menu):
mfs.contextMenuEvent(mock_event)
mfs.disable.emit.assert_called_once_with()
assert mfs.set_message.emit.call_count == 1
mfs.delete.emit.assert_called_once_with('foo.py')
def test_MicroPythonFileList_on_delete():
"""
On delete should emit a message and list_files signal.
"""
mfs = mu.interface.panes.MicroPythonDeviceFileList('homepath')
mfs.set_message = mock.MagicMock()
mfs.list_files = mock.MagicMock()
mfs.on_delete('my_file.py')
msg = "'my_file.py' successfully deleted from micro:bit."
mfs.set_message.emit.assert_called_once_with(msg)
mfs.list_files.emit.assert_called_once_with()
def test_LocalFileList_init():
"""
Ensure the class instantiates with the expected state.
"""
lfl = mu.interface.panes.LocalFileList('home/path')
assert lfl.home == 'home/path'
assert lfl.dragDropMode() == lfl.DragDrop
def test_LocalFileList_dropEvent():
"""
Ensure a valid drop event is handled as expected.
"""
mock_event = mock.MagicMock()
source = mu.interface.panes.MicroPythonDeviceFileList('homepath')
mock_item = mock.MagicMock()
mock_item.text.return_value = 'foo.py'
source.currentItem = mock.MagicMock(return_value=mock_item)
mock_event.source.return_value = source
lfs = mu.interface.panes.LocalFileList('homepath')
lfs.disable = mock.MagicMock()
lfs.set_message = mock.MagicMock()
lfs.get = mock.MagicMock()
# Test
lfs.dropEvent(mock_event)
fn = os.path.join('homepath', 'foo.py')
lfs.disable.emit.assert_called_once_with()
assert lfs.set_message.emit.call_count == 1
lfs.get.emit.assert_called_once_with('foo.py', fn)
def test_LocalFileList_dropEvent_wrong_source():
"""
Ensure that only drop events whose origins are LocalFileList objects are
handled.
"""
mock_event = mock.MagicMock()
source = mock.MagicMock()
mock_event.source.return_value = source
lfs = mu.interface.panes.LocalFileList('homepath')
lfs.findItems = mock.MagicMock()
lfs.dropEvent(mock_event)
assert lfs.findItems.call_count == 0
def test_LocalFileList_on_get():
"""
On get should emit two signals: a message and list_files.
"""
lfs = mu.interface.panes.LocalFileList('homepath')
lfs.set_message = mock.MagicMock()
lfs.list_files = mock.MagicMock()
lfs.on_get('my_file.py')
msg = ("Successfully copied 'my_file.py' from the micro:bit "
"to your computer.")
lfs.set_message.emit.assert_called_once_with(msg)
lfs.list_files.emit.assert_called_once_with()
def test_LocalFileList_contextMenuEvent():
"""
Ensure that the menu displayed when a local file is
right-clicked works as expected when activated.
"""
mock_menu = mock.MagicMock()
mock_action_first = mock.MagicMock()
mock_action_second = mock.MagicMock()
mock_menu.addAction.side_effect = [mock_action_first,
mock_action_second]
mock_menu.exec_.return_value = mock_action_first
mfs = mu.interface.panes.LocalFileList('homepath')
mock_open = mock.MagicMock()
mfs.open_file = mock.MagicMock()
mfs.open_file.emit = mock_open
mock_current = mock.MagicMock()
mock_current.text.return_value = 'foo.py'
mfs.currentItem = mock.MagicMock(return_value=mock_current)
mfs.set_message = mock.MagicMock()
mfs.mapToGlobal = mock.MagicMock()
mock_event = mock.MagicMock()
with mock.patch('mu.interface.panes.QMenu', return_value=mock_menu):
mfs.contextMenuEvent(mock_event)
assert mfs.set_message.emit.call_count == 0
mock_open.assert_called_once_with(os.path.join('homepath', 'foo.py'))
def test_LocalFileList_contextMenuEvent_external():
"""
Ensure that the menu displayed when a local file is
right-clicked works as expected when activated.
"""
mock_menu = mock.MagicMock()
mock_action = mock.MagicMock()
mock_menu.addAction.side_effect = [mock_action, mock.MagicMock()]
mock_menu.exec_.return_value = mock_action
mfs = mu.interface.panes.LocalFileList('homepath')
mock_open = mock.MagicMock()
mfs.open_file = mock.MagicMock()
mfs.open_file.emit = mock_open
mock_current = mock.MagicMock()
mock_current.text.return_value = 'foo.qwerty'
mfs.currentItem = mock.MagicMock(return_value=mock_current)
mfs.set_message = mock.MagicMock()
mfs.mapToGlobal = mock.MagicMock()
mock_event = mock.MagicMock()
with mock.patch('mu.interface.panes.QMenu', return_value=mock_menu):
mfs.contextMenuEvent(mock_event)
assert mfs.set_message.emit.call_count == 1
assert mock_open.call_count == 0
def test_FileSystemPane_init():
"""
Check things are set up as expected.
"""
home = 'homepath'
test_microbit_fs = mu.interface.panes.MicroPythonDeviceFileList(home)
test_microbit_fs.disable = mock.MagicMock()
test_microbit_fs.set_message = mock.MagicMock()
test_local_fs = mu.interface.panes.LocalFileList(home)
test_local_fs.disable = mock.MagicMock()
test_local_fs.set_message = mock.MagicMock()
mock_mfl = mock.MagicMock(return_value=test_microbit_fs)
mock_lfl = mock.MagicMock(return_value=test_local_fs)
with mock.patch('mu.interface.panes.MicroPythonDeviceFileList',
mock_mfl), \
mock.patch('mu.interface.panes.LocalFileList', mock_lfl):
fsp = mu.interface.panes.FileSystemPane('homepath')
assert isinstance(fsp.microbit_label, QLabel)
assert isinstance(fsp.local_label, QLabel)
assert fsp.microbit_fs == test_microbit_fs
assert fsp.local_fs == test_local_fs
test_microbit_fs.disable.connect.assert_called_once_with(fsp.disable)
test_microbit_fs.set_message.connect.\
assert_called_once_with(fsp.show_message)
test_local_fs.disable.connect.assert_called_once_with(fsp.disable)
test_local_fs.set_message.connect.\
assert_called_once_with(fsp.show_message)
def test_FileSystemPane_disable():
"""
The child list widgets are disabled correctly.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.microbit_fs = mock.MagicMock()
fsp.local_fs = mock.MagicMock()
fsp.disable()
fsp.microbit_fs.setDisabled.assert_called_once_with(True)
fsp.local_fs.setDisabled.assert_called_once_with(True)
fsp.microbit_fs.setAcceptDrops.assert_called_once_with(False)
fsp.local_fs.setAcceptDrops.assert_called_once_with(False)
def test_FileSystemPane_enable():
"""
The child list widgets are enabled correctly.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.microbit_fs = mock.MagicMock()
fsp.local_fs = mock.MagicMock()
fsp.enable()
fsp.microbit_fs.setDisabled.assert_called_once_with(False)
fsp.local_fs.setDisabled.assert_called_once_with(False)
fsp.microbit_fs.setAcceptDrops.assert_called_once_with(True)
fsp.local_fs.setAcceptDrops.assert_called_once_with(True)
def test_FileSystemPane_set_theme():
"""
Setting theme doesn't error
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.set_theme('test')
def test_FileSystemPane_show_message():
"""
Ensure the expected message signal is emitted.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.set_message = mock.MagicMock()
fsp.show_message('Hello')
fsp.set_message.emit.assert_called_once_with('Hello')
def test_FileSystemPane_show_warning():
"""
Ensure the expected warning signal is emitted.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.set_warning = mock.MagicMock()
fsp.show_warning('Hello')
fsp.set_warning.emit.assert_called_once_with('Hello')
def test_FileSystemPane_on_ls():
"""
When lists of files have been obtained from the micro:bit and local
filesystem, make sure they're properly processed by the on_ls event
handler.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
microbit_files = ['foo.py', 'bar.py', ]
fsp.microbit_fs = mock.MagicMock()
fsp.local_fs = mock.MagicMock()
fsp.enable = mock.MagicMock()
local_files = ['qux.py', 'baz.py', ]
mock_listdir = mock.MagicMock(return_value=local_files)
mock_isfile = mock.MagicMock(return_value=True)
with mock.patch('mu.interface.panes.os.listdir', mock_listdir),\
mock.patch('mu.interface.panes.os.path.isfile', mock_isfile):
fsp.on_ls(microbit_files)
fsp.microbit_fs.clear.assert_called_once_with()
fsp.local_fs.clear.assert_called_once_with()
assert fsp.microbit_fs.addItem.call_count == 2
assert fsp.local_fs.addItem.call_count == 2
fsp.enable.assert_called_once_with()
def test_FileSystemPane_on_ls_fail():
"""
A warning is emitted and the widget disabled if listing files fails.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.show_warning = mock.MagicMock()
fsp.disable = mock.MagicMock()
fsp.on_ls_fail()
assert fsp.show_warning.call_count == 1
fsp.disable.assert_called_once_with()
def test_FileSystem_Pane_on_put_fail():
"""
A warning is emitted if putting files on the micro:bit fails.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.show_warning = mock.MagicMock()
fsp.on_put_fail('foo.py')
assert fsp.show_warning.call_count == 1
def test_FileSystem_Pane_on_delete_fail():
"""
A warning is emitted if deleting files on the micro:bit fails.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.show_warning = mock.MagicMock()
fsp.on_delete_fail('foo.py')
assert fsp.show_warning.call_count == 1
def test_FileSystem_Pane_on_get_fail():
"""
A warning is emitted if getting files from the micro:bit fails.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.show_warning = mock.MagicMock()
fsp.on_get_fail('foo.py')
assert fsp.show_warning.call_count == 1
def test_FileSystemPane_set_font_size():
"""
Ensure the right size is set as the point size and the text based UI child
widgets are updated.
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.font = mock.MagicMock()
fsp.microbit_label = mock.MagicMock()
fsp.local_label = mock.MagicMock()
fsp.microbit_fs = mock.MagicMock()
fsp.local_fs = mock.MagicMock()
fsp.set_font_size(22)
fsp.font.setPointSize.assert_called_once_with(22)
fsp.microbit_label.setFont.assert_called_once_with(fsp.font)
fsp.local_label.setFont.assert_called_once_with(fsp.font)
fsp.microbit_fs.setFont.assert_called_once_with(fsp.font)
fsp.local_fs.setFont.assert_called_once_with(fsp.font)
def test_FileSystemPane_open_file():
"""
FileSystemPane should propogate the open_file signal
"""
fsp = mu.interface.panes.FileSystemPane('homepath')
fsp.open_file = mock.MagicMock()
mock_open_emit = mock.MagicMock()
fsp.open_file.emit = mock_open_emit
fsp.local_fs.open_file.emit('test')
mock_open_emit.assert_called_once_with('test')
def test_JupyterREPLPane_init():
"""
Ensure the widget is setup with the correct defaults.
"""
jw = mu.interface.panes.JupyterREPLPane()
assert jw.console_height == 10
def test_JupyterREPLPane_append_plain_text():
"""
Ensure signal and expected bytes are emitted when _append_plain_text is
called.
"""
jw = mu.interface.panes.JupyterREPLPane()
jw.on_append_text = mock.MagicMock()
jw._append_plain_text('hello')
jw.on_append_text.emit.assert_called_once_with('hello'.encode('utf-8'))
def test_JupyterREPLPane_set_font_size():
"""
Check the new point size is succesfully applied.
"""
jw = mu.interface.panes.JupyterREPLPane()
jw.set_font_size(16)
assert jw.font.pointSize() == 16
def test_JupyterREPLPane_set_zoom():
"""
Ensure the expected font point size is set from the zoom size.
"""
jw = mu.interface.panes.JupyterREPLPane()
jw.set_font_size = mock.MagicMock()
jw.set_zoom('xxl')
jw.set_font_size.\
assert_called_once_with(mu.interface.panes.PANE_ZOOM_SIZES['xxl'])
def test_JupyterREPLPane_set_theme_day():
"""
Make sure the theme is correctly set for day.
"""
jw = mu.interface.panes.JupyterREPLPane()
jw.set_default_style = mock.MagicMock()
jw.set_theme('day')
jw.set_default_style.assert_called_once_with()
def test_JupyterREPLPane_set_theme_night():
"""
Make sure the theme is correctly set for night.
"""
jw = mu.interface.panes.JupyterREPLPane()
jw.set_default_style = mock.MagicMock()
jw.set_theme('night')
jw.set_default_style.assert_called_once_with(colors='nocolor')
def test_JupyterREPLPane_set_theme_contrast():
"""
Make sure the theme is correctly set for high contrast.
"""
jw = mu.interface.panes.JupyterREPLPane()
jw.set_default_style = mock.MagicMock()
jw.set_theme('contrast')
jw.set_default_style.assert_called_once_with(colors='nocolor')
def test_JupyterREPLPane_setFocus():
"""
Ensures setFocus actually occurs to the _control containing the REPL.
"""
jw = mu.interface.panes.JupyterREPLPane()
jw._control = mock.MagicMock()
jw.setFocus()
jw._control.setFocus.assert_called_once_with()
def test_PythonProcessPane_init():
"""
Check the font, input_buffer and other initial state is set as expected.
"""
ppp = mu.interface.panes.PythonProcessPane()
assert ppp.font()
assert ppp.process is None
assert ppp.input_history == []
assert ppp.start_of_current_line == 0
assert ppp.history_position == 0
assert ppp.running is False
assert ppp.stdout_buffer == b''
assert ppp.reading_stdout is False
def test_PythonProcessPane_start_process():
"""
Ensure the default arguments for starting a new process work as expected.
Interactive mode is True, no debugger flag nor additional arguments.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
with mock.patch('mu.interface.panes.QProcess', mock_process_class):
ppp = mu.interface.panes.PythonProcessPane()
ppp.start_process('script.py', 'workspace')
assert mock_process_class.call_count == 1
assert ppp.process == mock_process
ppp.process.setProcessChannelMode.assert_called_once_with(mock_merge_chans)
ppp.process.setWorkingDirectory.assert_called_once_with('workspace')
ppp.process.readyRead.connect.\
assert_called_once_with(ppp.try_read_from_stdout)
ppp.process.finished.connect.assert_called_once_with(ppp.finished)
expected_script = os.path.abspath(os.path.normcase('script.py'))
assert ppp.script == expected_script
runner = sys.executable
expected_args = ['-i', expected_script, ] # called with interactive flag.
ppp.process.start.assert_called_once_with(runner, expected_args)
assert ppp.running is True
def test_PythonProcessPane_start_process_command_args():
"""
Ensure that the new process is passed the expected comand line args.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
with mock.patch('mu.interface.panes.QProcess', mock_process_class):
ppp = mu.interface.panes.PythonProcessPane()
args = ['foo', 'bar', ]
ppp.start_process('script.py', 'workspace', command_args=args)
runner = sys.executable
expected_script = os.path.abspath(os.path.normcase('script.py'))
expected_args = ['-i', expected_script, 'foo', 'bar', ]
ppp.process.start.assert_called_once_with(runner, expected_args)
def test_PythonProcessPane_start_process_debugger():
"""
Ensure starting a new process with the debugger flag set to True uses the
debug runner to execute the script.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
with mock.patch('mu.interface.panes.QProcess', mock_process_class):
ppp = mu.interface.panes.PythonProcessPane()
args = ['foo', 'bar', ]
ppp.start_process('script.py', 'workspace', debugger=True,
command_args=args)
mu_dir = os.path.dirname(os.path.abspath(mu.__file__))
runner = os.path.join(mu_dir, 'mu-debug.py')
python_exec = sys.executable
expected_script = os.path.abspath(os.path.normcase('script.py'))
expected_args = [runner, expected_script, 'foo', 'bar', ]
ppp.process.start.assert_called_once_with(python_exec, expected_args)
def test_PythonProcessPane_start_process_not_interactive():
"""
Ensure that if the interactive flag is unset, the "-i" flag passed into
the Python process is missing.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
with mock.patch('mu.interface.panes.QProcess', mock_process_class):
ppp = mu.interface.panes.PythonProcessPane()
args = ['foo', 'bar', ]
ppp.start_process('script.py', 'workspace', interactive=False,
command_args=args)
runner = sys.executable
expected_script = os.path.abspath(os.path.normcase('script.py'))
expected_args = [expected_script, 'foo', 'bar', ]
ppp.process.start.assert_called_once_with(runner, expected_args)
def test_PythonProcessPane_start_process_windows_path():
"""
If running on Windows via the installer ensure that the expected paths
find their way into a temporary mu.pth file.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
mock_sys = mock.MagicMock()
mock_sys.platform = 'win32'
mock_sys.executable = 'C:\\Program Files\\Mu\\Python\\pythonw.exe'
mock_os_p_e = mock.MagicMock(return_value=True)
mock_os_makedirs = mock.MagicMock()
mock_site = mock.MagicMock()
mock_site.ENABLE_USER_SITE = True
mock_site.USER_SITE = ('C:\\Users\\foo\\AppData\\Roaming\\Python\\'
'Python36\\site-packages')
mock_site.getusersitepackages.return_value = mock_site.USER_SITE
mock_open = mock.mock_open()
with mock.patch('mu.interface.panes.QProcess', mock_process_class),\
mock.patch('mu.interface.panes.sys', mock_sys),\
mock.patch('mu.interface.panes.os.path.exists', mock_os_p_e),\
mock.patch('mu.interface.panes.os.makedirs', mock_os_makedirs),\
mock.patch('mu.interface.panes.site', mock_site),\
mock.patch('builtins.open', mock_open):
ppp = mu.interface.panes.PythonProcessPane()
ppp.start_process('script.py', 'workspace', interactive=False)
expected_pth = os.path.join(mock_site.USER_SITE, 'mu.pth')
mock_os_makedirs.assert_called_once_with(mock_site.USER_SITE,
exist_ok=True)
mock_open.assert_called_once_with(expected_pth, 'w')
expected = [
'workspace',
os.path.normcase(os.path.dirname(os.path.abspath('script.py'))),
]
mock_file = mock_open()
added_paths = [call[0][0] for call in mock_file.write.call_args_list]
for e in expected:
assert e + '\n' in added_paths
def test_PythonProcessPane_start_process_windows_path_no_user_site():
"""
If running on Windows via the installer ensure that the Mu logs the
fact it's unable to use the temporary mu.pth file because there is no
USER_SITE enabled.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
mock_sys = mock.MagicMock()
mock_sys.platform = 'win32'
mock_sys.executable = 'C:\\Program Files\\Mu\\Python\\pythonw.exe'
mock_os_p_e = mock.MagicMock(return_value=True)
mock_site = mock.MagicMock()
mock_site.ENABLE_USER_SITE = False
mock_log = mock.MagicMock()
with mock.patch('mu.interface.panes.QProcess', mock_process_class),\
mock.patch('mu.interface.panes.sys', mock_sys),\
mock.patch('mu.interface.panes.os.path.exists', mock_os_p_e),\
mock.patch('mu.interface.panes.site', mock_site),\
mock.patch('mu.interface.panes.logger', mock_log):
ppp = mu.interface.panes.PythonProcessPane()
ppp.start_process('script.py', 'workspace', interactive=False)
logs = [call[0][0] for call in mock_log.info.call_args_list]
expected = ("Unable to set Python paths. Python's USER_SITE not enabled."
" Check configuration with administrator.")
assert expected in logs
def test_PythonProcessPane_start_process_windows_path_with_exception():
"""
If running on Windows via the installer ensure that the expected paths
find their way into a temporary mu.pth file.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
mock_sys = mock.MagicMock()
mock_sys.platform = 'win32'
mock_sys.executable = 'C:\\Program Files\\Mu\\Python\\pythonw.exe'
mock_os_p_e = mock.MagicMock(return_value=True)
mock_site = mock.MagicMock()
mock_site.ENABLE_USER_SITE = True
mock_site.USER_SITE = ('C:\\Users\\foo\\AppData\\Roaming\\Python\\'
'Python36\\site-packages')
mock_open = mock.MagicMock(side_effect=Exception("Boom"))
mock_log = mock.MagicMock()
with mock.patch('mu.interface.panes.QProcess', mock_process_class),\
mock.patch('mu.interface.panes.sys', mock_sys),\
mock.patch('mu.interface.panes.os.path.exists', mock_os_p_e),\
mock.patch('mu.interface.panes.site', mock_site),\
mock.patch('builtins.open', mock_open),\
mock.patch('mu.interface.panes.logger', mock_log):
ppp = mu.interface.panes.PythonProcessPane()
ppp.start_process('script.py', 'workspace', interactive=False)
logs = [call[0][0] for call in mock_log.error.call_args_list]
expected = ("Could not set Python paths with mu.pth file.")
assert expected in logs
def test_PythonProcessPane_start_process_user_enviroment_variables():
"""
Ensure that if environment variables are set, they are set in the context
of the new child Python process.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
mock_environment = mock.MagicMock()
mock_environment_class = mock.MagicMock()
mock_environment_class.systemEnvironment.return_value = mock_environment
with mock.patch('mu.interface.panes.QProcess', mock_process_class), \
mock.patch('mu.interface.panes.QProcessEnvironment',
mock_environment_class):
ppp = mu.interface.panes.PythonProcessPane()
envars = [['name', 'value'], ]
ppp.start_process('script.py', 'workspace', interactive=False,
envars=envars, runner='foo')
assert mock_environment.insert.call_count == 4
assert mock_environment.insert.call_args_list[0][0] == ('PYTHONUNBUFFERED',
'1')
assert mock_environment.insert.call_args_list[1][0] == ('PYTHONIOENCODING',
'utf-8')
assert mock_environment.insert.call_args_list[2][0] == ('name', 'value')
expected_path = os.pathsep.join(sys.path)
assert mock_environment.insert.call_args_list[3][0] == ('PYTHONPATH',
expected_path)
def test_PythonProcessPane_start_process_custom_runner():
"""
Ensure that if the runner is set, it is used as the command to start the
new child Python process.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
with mock.patch('mu.interface.panes.QProcess', mock_process_class):
ppp = mu.interface.panes.PythonProcessPane()
args = ['foo', 'bar', ]
ppp.start_process('script.py', 'workspace', interactive=False,
command_args=args, runner='foo')
expected_script = os.path.abspath(os.path.normcase('script.py'))
expected_args = [expected_script, 'foo', 'bar', ]
ppp.process.start.assert_called_once_with('foo', expected_args)
def test_PythonProcessPane_start_process_custom_python_args():
"""
Ensure that if there are arguments to be passed into the Python runtime
starting the child process, these are passed on correctly.
"""
mock_process = mock.MagicMock()
mock_process_class = mock.MagicMock(return_value=mock_process)
mock_merge_chans = mock.MagicMock()
mock_process_class.MergedChannels = mock_merge_chans
with mock.patch('mu.interface.panes.QProcess', mock_process_class):
ppp = mu.interface.panes.PythonProcessPane()
py_args = ['-m', 'pgzero', ]
ppp.start_process('script.py', 'workspace', interactive=False,
python_args=py_args)
expected_script = os.path.abspath(os.path.normcase('script.py'))
expected_args = ['-m', 'pgzero', expected_script]
runner = sys.executable
ppp.process.start.assert_called_once_with(runner, expected_args)
def test_PythonProcessPane_finished():
"""
Check the functionality to handle the process finishing is correct.
"""
ppp = mu.interface.panes.PythonProcessPane()
mock_cursor = mock.MagicMock()
mock_cursor.insertText = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.setReadOnly = mock.MagicMock()
ppp.setTextCursor = mock.MagicMock()
ppp.finished(0, 1)
assert mock_cursor.insertText.call_count == 2
assert 'exit code: 0' in mock_cursor.insertText.call_args[0][0]
assert 'status: 1' in mock_cursor.insertText.call_args[0][0]
ppp.setReadOnly.assert_called_once_with(True)
ppp.setTextCursor.assert_called_once_with(ppp.textCursor())
def test_PythonProcessPane_context_menu():
"""
Ensure the context menu for the REPL is configured correctly for non-OSX
platforms.
"""
mock_platform = mock.MagicMock()
mock_platform.system.return_value = 'WinNT'
mock_qmenu = mock.MagicMock()
mock_qmenu_class = mock.MagicMock(return_value=mock_qmenu)
with mock.patch('mu.interface.panes.platform', mock_platform), \
mock.patch('mu.interface.panes.QMenu', mock_qmenu_class), \
mock.patch('mu.interface.panes.QCursor'):
ppp = mu.interface.panes.PythonProcessPane()
ppp.context_menu()
assert mock_qmenu.addAction.call_count == 2
copy_action = mock_qmenu.addAction.call_args_list[0][0]
assert copy_action[0] == 'Copy'
assert copy_action[1] == ppp.copy
assert copy_action[2].toString() == 'Ctrl+Shift+C'
paste_action = mock_qmenu.addAction.call_args_list[1][0]
assert paste_action[0] == 'Paste'
assert paste_action[1] == ppp.paste
assert paste_action[2].toString() == 'Ctrl+Shift+V'
assert mock_qmenu.exec_.call_count == 1
def test_PythonProcessPane_context_menu_darwin():
"""
Ensure the context menu for the REPL is configured correctly for non-OSX
platforms.
"""
mock_platform = mock.MagicMock()
mock_platform.system.return_value = 'Darwin'
mock_qmenu = mock.MagicMock()
mock_qmenu_class = mock.MagicMock(return_value=mock_qmenu)
with mock.patch('mu.interface.panes.platform', mock_platform), \
mock.patch('mu.interface.panes.QMenu', mock_qmenu_class), \
mock.patch('mu.interface.panes.QCursor'):
ppp = mu.interface.panes.PythonProcessPane()
ppp.context_menu()
assert mock_qmenu.addAction.call_count == 2
copy_action = mock_qmenu.addAction.call_args_list[0][0]
assert copy_action[0] == 'Copy'
assert copy_action[1] == ppp.copy
assert copy_action[2].toString() == 'Ctrl+C'
paste_action = mock_qmenu.addAction.call_args_list[1][0]
assert paste_action[0] == 'Paste'
assert paste_action[1] == ppp.paste
assert paste_action[2].toString() == 'Ctrl+V'
assert mock_qmenu.exec_.call_count == 1
def test_PythonProcessPane_paste():
"""
Ensure pasted text is handed off to the parse_paste method.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.parse_paste = mock.MagicMock()
mock_clipboard = mock.MagicMock()
mock_clipboard.text.return_value = 'Hello'
with mock.patch('mu.interface.panes.QApplication.clipboard',
return_value=mock_clipboard):
ppp.paste()
ppp.parse_paste.assert_called_once_with('Hello')
def test_PythonProcessPane_paste_normalize_windows_newlines():
"""
Ensure that pasted text containing Windows style line-ends is normalised
to '\n'.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.parse_paste = mock.MagicMock()
mock_clipboard = mock.MagicMock()
mock_clipboard.text.return_value = 'h\r\ni'
with mock.patch('mu.interface.panes.QApplication.clipboard',
return_value=mock_clipboard):
ppp.paste()
ppp.parse_paste.assert_called_once_with('h\ni')
def test_PythonProcessPane_parse_paste():
"""
Given some text ensure that the first character is correctly handled and
the remaining text to be processed is scheduled to be parsed in the future.
Essentially parse_paste pretends to be someone typing in the characters of
the pasted text *really fast*, rather than as a single shot dump of data.
This is so the event loop can cycle to handle any output from the child
process.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.parse_input = mock.MagicMock()
mock_timer = mock.MagicMock()
with mock.patch('mu.interface.panes.QTimer', mock_timer):
ppp.parse_paste('hello')
ppp.parse_input.assert_called_once_with(None, 'h', None)
assert mock_timer.singleShot.call_count == 1
def test_PythonProcessPane_parse_paste_non_ascii():
"""
Given some non-ascii yet printable text, ensure that the first character is
correctly handled and the remaining text to be processed is scheduled to be
parsed in the future.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.parse_input = mock.MagicMock()
mock_timer = mock.MagicMock()
with mock.patch('mu.interface.panes.QTimer', mock_timer):
ppp.parse_paste('ÅÄÖ')
ppp.parse_input.assert_called_once_with(None, 'Å', None)
assert mock_timer.singleShot.call_count == 1
def test_PythonProcessPane_parse_paste_newline():
"""
As above, but ensure the correct handling of a newline character.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.parse_input = mock.MagicMock()
mock_timer = mock.MagicMock()
with mock.patch('mu.interface.panes.QTimer', mock_timer):
ppp.parse_paste('\nhello')
ppp.parse_input.assert_called_once_with(Qt.Key_Enter, '\n', None)
assert mock_timer.singleShot.call_count == 1
def test_PythonProcessPane_parse_paste_final_character():
"""
As above, but ensure that if there a no more remaining characters to parse
in the pasted text, then don't schedule any more recursive calls.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.parse_input = mock.MagicMock()
mock_timer = mock.MagicMock()
with mock.patch('mu.interface.panes.QTimer', mock_timer):
ppp.parse_paste('\n')
ppp.parse_input.assert_called_once_with(Qt.Key_Enter, '\n', None)
assert mock_timer.singleShot.call_count == 0
def test_PythonProcessPane_keyPressEvent_a():
"""
A character is typed and passed into parse_input in the expected manner.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.parse_input = mock.MagicMock()
data = mock.MagicMock
data.key = mock.MagicMock(return_value=Qt.Key_A)
data.text = mock.MagicMock(return_value='a')
data.modifiers = mock.MagicMock(return_value=None)
ppp.keyPressEvent(data)
ppp.parse_input.assert_called_once_with(Qt.Key_A, 'a', None)
def test_PythonProcessPane_on_process_halt():
"""
Ensure the output from the halted process is dumped to the UI.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.process = mock.MagicMock()
ppp.process.readAll().data.return_value = b'halted'
ppp.append = mock.MagicMock()
ppp.on_append_text = mock.MagicMock()
ppp.set_start_of_current_line = mock.MagicMock()
ppp.on_process_halt()
ppp.process.readAll().data.assert_called_once_with()
ppp.append.assert_called_once_with(b'halted')
ppp.on_append_text.emit.assert_called_once_with(b'halted')
ppp.set_start_of_current_line.assert_called_once_with()
def test_PythonProcessPane_on_process_halt_badly_formed_bytes():
"""
If the bytes read from the child process's stdout starts with a badly
formed unicode character (e.g. a fragment of a multi-byte character such as
"𠜎"), then ensure the problem bytes at the start of the data are discarded
until a valid result can be turned into a string.
"""
data = "𠜎Hello, World!".encode('utf-8') # Contains a multi-byte char.
data = data[1:] # Split the muti-byte character (cause UnicodeDecodeError)
ppp = mu.interface.panes.PythonProcessPane()
ppp.process = mock.MagicMock()
ppp.process.readAll().data.return_value = data
ppp.on_append_text = mock.MagicMock()
ppp.set_start_of_current_line = mock.MagicMock()
ppp.on_process_halt()
ppp.process.readAll().data.assert_called_once_with()
ppp.on_append_text.emit.assert_called_once_with(b'Hello, World!')
ppp.set_start_of_current_line.assert_called_once_with()
def test_PythonProcessPane_parse_input_a():
"""
Ensure a regular printable character is inserted into the text area.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.insert = mock.MagicMock()
key = Qt.Key_A
text = 'a'
modifiers = None
ppp.parse_input(key, text, modifiers)
ppp.insert.assert_called_once_with(b'a')
def test_PythonProcessPane_parse_input_non_ascii():
"""
Ensure a non-ascii printable character is inserted into the text area.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.insert = mock.MagicMock()
key = Qt.Key_A
text = 'Å'
modifiers = None
ppp.parse_input(key, text, modifiers)
ppp.insert.assert_called_once_with('Å'.encode('utf-8'))
def test_PythonProcessPane_parse_input_ctrl_c():
"""
Control-C (SIGINT / KeyboardInterrupt) character is typed.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.process = mock.MagicMock()
ppp.process.processId.return_value = 123
ppp.running = True
key = Qt.Key_C
text = ''
modifiers = Qt.ControlModifier
mock_kill = mock.MagicMock()
mock_timer = mock.MagicMock()
with mock.patch('mu.interface.panes.os.kill', mock_kill), \
mock.patch('mu.interface.panes.QTimer', mock_timer), \
mock.patch('mu.interface.panes.platform.system',
return_value='win32'):
ppp.parse_input(key, text, modifiers)
mock_kill.assert_called_once_with(123, signal.SIGINT)
ppp.process.readAll.assert_called_once_with()
mock_timer.singleShot.assert_called_once_with(1, ppp.on_process_halt)
def test_PythonProcessPane_parse_input_ctrl_d():
"""
Control-D (Kill process) character is typed.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.process = mock.MagicMock()
ppp.running = True
key = Qt.Key_D
text = ''
modifiers = Qt.ControlModifier
mock_timer = mock.MagicMock()
with mock.patch('mu.interface.panes.platform.system',
return_value='win32'), \
mock.patch('mu.interface.panes.QTimer', mock_timer):
ppp.parse_input(key, text, modifiers)
ppp.process.kill.assert_called_once_with()
ppp.process.readAll.assert_called_once_with()
mock_timer.singleShot.assert_called_once_with(1, ppp.on_process_halt)
def test_PythonProcessPane_parse_input_ctrl_c_after_process_finished():
"""
Control-C (SIGINT / KeyboardInterrupt) character is typed.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.process = mock.MagicMock()
ppp.process.processId.return_value = 123
ppp.running = False
key = Qt.Key_C
text = ''
modifiers = Qt.ControlModifier
mock_kill = mock.MagicMock()
with mock.patch('mu.interface.panes.os.kill', mock_kill), \
mock.patch('mu.interface.panes.platform.system',
return_value='win32'):
ppp.parse_input(key, text, modifiers)
assert mock_kill.call_count == 0
def test_PythonProcessPane_parse_input_ctrl_d_after_process_finished():
"""
Control-D (Kill process) character is typed.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.process = mock.MagicMock()
ppp.running = False
key = Qt.Key_D
text = ''
modifiers = Qt.ControlModifier
with mock.patch('mu.interface.panes.platform.system',
return_value='win32'):
ppp.parse_input(key, text, modifiers)
assert ppp.process.kill.call_count == 0
def test_PythonProcessPane_parse_input_up_arrow():
"""
Up Arrow causes the input line to be replaced with movement back in
command history.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.history_back = mock.MagicMock()
key = Qt.Key_Up
text = ''
modifiers = None
ppp.parse_input(key, text, modifiers)
assert ppp.history_back.call_count == 1
def test_PythonProcessPane_parse_input_down_arrow():
"""
Down Arrow causes the input line to be replaced with movement forward
through command line.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.history_forward = mock.MagicMock()
key = Qt.Key_Down
text = ''
modifiers = None
ppp.parse_input(key, text, modifiers)
assert ppp.history_forward.call_count == 1
def test_PythonProcessPane_parse_input_right_arrow():
"""
Right Arrow causes the cursor to move to the right one place.
"""
ppp = mu.interface.panes.PythonProcessPane()
mock_cursor = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.setTextCursor = mock.MagicMock()
key = Qt.Key_Right
text = ''
modifiers = None
ppp.parse_input(key, text, modifiers)
mock_cursor.movePosition.assert_called_once_with(QTextCursor.Right)
ppp.setTextCursor.assert_called_once_with(mock_cursor)
def test_PythonProcessPane_parse_input_left_arrow():
"""
Left Arrow causes the cursor to move to the left one place if not at the
start of the input line.
"""
ppp = mu.interface.panes.PythonProcessPane()
mock_cursor = mock.MagicMock()
mock_cursor.position.return_value = 1
ppp.start_of_current_line = 0
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.setTextCursor = mock.MagicMock()
key = Qt.Key_Left
text = ''
modifiers = None
ppp.parse_input(key, text, modifiers)
mock_cursor.movePosition.assert_called_once_with(QTextCursor.Left)
ppp.setTextCursor.assert_called_once_with(mock_cursor)
def test_PythonProcessPane_parse_input_left_arrow_at_start_of_line():
"""
Left Arrow doesn't do anything if the current cursor position is at the
start of the input line.
"""
ppp = mu.interface.panes.PythonProcessPane()
mock_cursor = mock.MagicMock()
mock_cursor.position.return_value = 1
ppp.start_of_current_line = 1
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.setTextCursor = mock.MagicMock()
key = Qt.Key_Left
text = ''
modifiers = None
ppp.parse_input(key, text, modifiers)
assert mock_cursor.movePosition.call_count == 0
assert ppp.setTextCursor.call_count == 0
def test_PythonProcessPane_parse_input_home():
"""
Home moves cursor to the start of the input line.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.toPlainText = mock.MagicMock(return_value='hello')
mock_cursor = mock.MagicMock()
ppp.start_of_current_line = 0
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.setTextCursor = mock.MagicMock()
key = Qt.Key_Home
text = ''
modifiers = None
ppp.parse_input(key, text, modifiers)
# Move to the end of the line, then move left len of 'hello'.
assert mock_cursor.movePosition.call_count == 6
ppp.setTextCursor.assert_called_once_with(mock_cursor)
def test_PythonProcessPane_parse_input_end():
"""
End moves cursor to the end of the input line.
"""
ppp = mu.interface.panes.PythonProcessPane()
mock_cursor = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.setTextCursor = mock.MagicMock()
key = Qt.Key_End
text = ''
modifiers = None
ppp.parse_input(key, text, modifiers)
mock_cursor.movePosition.assert_called_once_with(QTextCursor.End)
ppp.setTextCursor.assert_called_once_with(mock_cursor)
def test_PythonProcessPane_parse_input_paste():
"""
Control-Shift-V (paste) character causes a paste to happen.
"""
ppp = mu.interface.panes.PythonProcessPane()
key = Qt.Key_V
text = ''
modifiers = Qt.ControlModifier | Qt.ShiftModifier
ppp.paste = mock.MagicMock()
ppp.parse_input(key, text, modifiers)
ppp.paste.assert_called_once_with()
def test_PythonProcessPane_parse_input_copy():
"""
Control-Shift-C (copy) character causes copy to happen.
"""
ppp = mu.interface.panes.PythonProcessPane()
key = Qt.Key_C
text = ''
modifiers = Qt.ControlModifier | Qt.ShiftModifier
ppp.copy = mock.MagicMock()
ppp.parse_input(key, text, modifiers)
ppp.copy.assert_called_once_with()
def test_PythonProcessPane_parse_input_backspace():
"""
Backspace call causes a backspace from the character at the cursor
position.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.backspace = mock.MagicMock()
key = Qt.Key_Backspace
text = '\b'
modifiers = None
ppp.parse_input(key, text, modifiers)
ppp.backspace.assert_called_once_with()
def test_PythonProcessPane_parse_input_delete():
"""
Delete deletes the character to the right of the cursor position.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.delete = mock.MagicMock()
key = Qt.Key_Delete
text = '\b'
modifiers = None
ppp.parse_input(key, text, modifiers)
ppp.delete.assert_called_once_with()
def test_PythonProcessPane_parse_input_newline():
"""
Newline causes the input line to be written to the child process's stdin.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.toPlainText = mock.MagicMock(return_value='abc\n')
ppp.start_of_current_line = 0
ppp.textCursor = mock.MagicMock()
ppp.textCursor().position.return_value = 666
ppp.setTextCursor = mock.MagicMock()
ppp.insert = mock.MagicMock()
ppp.write_to_stdin = mock.MagicMock()
key = Qt.Key_Enter
text = '\r'
modifiers = None
ppp.parse_input(key, text, modifiers)
ppp.write_to_stdin.assert_called_once_with(b'abc\n')
assert b'abc' in ppp.input_history
assert ppp.history_position == 0
# On newline, the start of the current line should be set correctly.
assert ppp.start_of_current_line == 4 # len('abc\n')
def test_PythonProcessPane_parse_input_newline_ignore_empty_input_in_history():
"""
Newline causes the input line to be written to the child process's stdin,
but if the resulting line is either empty or only contains whitespace, do
not add it to the input_history.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.toPlainText = mock.MagicMock(return_value=' \n')
ppp.start_of_current_line = 0
ppp.write_to_stdin = mock.MagicMock()
key = Qt.Key_Enter
text = '\r'
modifiers = None
ppp.parse_input(key, text, modifiers)
ppp.write_to_stdin.assert_called_once_with(b' \n')
assert len(ppp.input_history) == 0
assert ppp.history_position == 0
def test_PythonProcessPane_parse_input_newline_with_cursor_midline():
"""
Ensure that when the cursor is placed in the middle of a line and enter is
pressed the whole line is sent to std_in.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.write_to_stdin = mock.MagicMock()
ppp.parse_input(None, "abc", None)
ppp.parse_input(Qt.Key_Left, None, None)
ppp.parse_input(Qt.Key_Enter, '\r', None)
ppp.write_to_stdin.assert_called_with(b'abc\n')
def test_PythonProcessPane_set_start_of_current_line():
"""
Ensure the start of the current line is set to the current length of the
text in the editor pane.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.toPlainText = mock.MagicMock(return_value="Hello𠜎")
ppp.set_start_of_current_line()
assert ppp.start_of_current_line == len("Hello𠜎")
def test_PythonProcessPane_history_back():
"""
Ensure the current input line is replaced by the next item back in time
from the current history position.
"""
ppp = mu.interface.panes.PythonProcessPane()
# 'a' was typed first, 'c' is the most recent entry.
ppp.input_history = ['a', 'b', 'c', ]
ppp.history_position = 0
ppp.replace_input_line = mock.MagicMock()
ppp.history_back()
ppp.replace_input_line.assert_called_once_with('c')
assert ppp.history_position == -1
def test_PythonProcessPane_history_back_at_first_item():
"""
Ensure the current input line is replaced by the next item back in time
from the current history position.
"""
ppp = mu.interface.panes.PythonProcessPane()
# 'a' was typed first, 'c' is the most recent entry.
ppp.input_history = ['a', 'b', 'c', ]
ppp.history_position = -3
ppp.replace_input_line = mock.MagicMock()
ppp.history_back()
ppp.replace_input_line.assert_called_once_with('a')
assert ppp.history_position == -3
def test_PythonProcessPane_history_forward():
"""
Ensure the current input line is replaced by the next item forward in time
from the current history position.
"""
ppp = mu.interface.panes.PythonProcessPane()
# 'a' was typed first, 'c' is the most recent entry.
ppp.input_history = ['a', 'b', 'c', ]
ppp.history_position = -3
ppp.replace_input_line = mock.MagicMock()
ppp.history_forward()
ppp.replace_input_line.assert_called_once_with('b')
assert ppp.history_position == -2
def test_PythonProcessPane_history_forward_at_last_item():
"""
Ensure the current input line is cleared if the history position was at
the most recent item.
"""
ppp = mu.interface.panes.PythonProcessPane()
# 'a' was typed first, 'c' is the most recent entry.
ppp.input_history = ['a', 'b', 'c', ]
ppp.history_position = -1
ppp.replace_input_line = mock.MagicMock()
ppp.clear_input_line = mock.MagicMock()
ppp.history_forward()
ppp.clear_input_line.assert_called_once_with()
assert ppp.replace_input_line.call_count == 0
assert ppp.history_position == 0
def test_PythonProcessPane_try_read_from_stdout_not_started():
"""
If the process pane is NOT already reading from STDOUT then ensure it
starts to.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.read_from_stdout = mock.MagicMock()
ppp.try_read_from_stdout()
assert ppp.reading_stdout is True
ppp.read_from_stdout.assert_called_once_with()
def test_PythonProcessPane_try_read_from_stdout_has_started():
"""
If the process pane is already reading from STDOUT then ensure it
doesn't keep trying.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.read_from_stdout = mock.MagicMock()
ppp.reading_stdout = True
ppp.try_read_from_stdout()
assert ppp.reading_stdout is True
assert ppp.read_from_stdout.call_count == 0
def test_PythonProcessPane_read_from_stdout():
"""
Ensure incoming bytes from sub-process's stout are processed correctly.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.append = mock.MagicMock()
ppp.process = mock.MagicMock()
ppp.process.read.return_value = b'hello world'
ppp.on_append_text = mock.MagicMock()
ppp.set_start_of_current_line = mock.MagicMock()
mock_timer = mock.MagicMock()
with mock.patch('mu.interface.panes.QTimer', mock_timer):
ppp.read_from_stdout()
assert ppp.append.call_count == 1
ppp.process.read.assert_called_once_with(256)
ppp.on_append_text.emit.assert_called_once_with(b'hello world')
ppp.set_start_of_current_line.assert_called_once_with()
mock_timer.singleShot.assert_called_once_with(2, ppp.read_from_stdout)
def test_PythonProcessPane_read_from_stdout_with_stdout_buffer():
"""
Ensure incoming bytes from sub-process's stdout are processed correctly if
there was a split between reads in a multi-byte character (such as "𠜎").
The buffer is pre-pended to the current read, thus resulting in bytes that
can be successfully represented in a UTF based string.
"""
msg = "Hello 𠜎 world".encode('utf-8')
ppp = mu.interface.panes.PythonProcessPane()
ppp.stdout_buffer = msg[:7] # Start of msg but split in multi-byte char.
ppp.process = mock.MagicMock()
ppp.process.read.return_value = msg[7:] # Remainder of msg.
ppp.on_append_text = mock.MagicMock()
ppp.set_start_of_current_line = mock.MagicMock()
mock_timer = mock.MagicMock()
with mock.patch('mu.interface.panes.QTimer', mock_timer):
ppp.read_from_stdout()
ppp.process.read.assert_called_once_with(256)
ppp.on_append_text.emit.assert_called_once_with(msg)
ppp.set_start_of_current_line.assert_called_once_with()
mock_timer.singleShot.assert_called_once_with(2, ppp.read_from_stdout)
assert ppp.stdout_buffer == b''
def test_PythonProcessPane_read_from_stdout_with_unicode_error():
"""
Ensure incoming bytes from sub-process's stdout are processed correctly if
there was a split between reads in a multi-byte character (such as "𠜎").
If the read bytes end with a split of a multi-byte character, ensure they
are put into the self.stdout_buffer so they can be pre-pended to the next
bytes read from the child process.
"""
msg = "Hello 𠜎 world".encode('utf-8')
ppp = mu.interface.panes.PythonProcessPane()
ppp.process = mock.MagicMock()
ppp.process.read.return_value = msg[:7] # Split the multi-byte character.
ppp.on_append_text = mock.MagicMock()
ppp.set_start_of_current_line = mock.MagicMock()
mock_timer = mock.MagicMock()
with mock.patch('mu.interface.panes.QTimer', mock_timer):
ppp.read_from_stdout()
ppp.process.read.assert_called_once_with(256)
assert ppp.on_append_text.emit.call_count == 0
assert ppp.set_start_of_current_line.call_count == 0
mock_timer.singleShot.assert_called_once_with(2, ppp.read_from_stdout)
assert ppp.stdout_buffer == msg[:7]
def test_PythonProcessPane_read_from_stdout_no_data():
"""
If no data is returned, ensure the reading_stdout flag is reset to False.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.reading_stdout = True
ppp.process = mock.MagicMock()
ppp.process.read.return_value = b''
ppp.read_from_stdout()
assert ppp.reading_stdout is False
def test_PythonProcessPane_write_to_stdin():
"""
Ensure input from the user is written to the child process.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.process = mock.MagicMock()
ppp.write_to_stdin(b'hello')
ppp.process.write.assert_called_once_with(b'hello')
def test_PythonProcessPane_append():
"""
Ensure the referenced byte_stream is added to the textual content of the
QTextEdit.
"""
ppp = mu.interface.panes.PythonProcessPane()
mock_cursor = mock.MagicMock()
ppp.setTextCursor = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.append(b'hello')
mock_cursor.insertText.assert_called_once_with('hello')
assert mock_cursor.movePosition.call_count == 2
def test_PythonProcessPane_insert_within_input_line():
"""
Ensure text is inserted at the end of the document if the current cursor
position is not within the bounds of the input line.
"""
ppp = mu.interface.panes.PythonProcessPane()
mock_cursor = mock.MagicMock()
mock_cursor.position.return_value = 1
ppp.start_of_current_line = 100
ppp.setTextCursor = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.insert(b'hello')
mock_cursor.movePosition.assert_called_once_with(QTextCursor.End)
mock_cursor.insertText.assert_called_once_with('hello')
def test_PythonProcessPane_insert():
"""
Ensure text is inserted at the current cursor position.
"""
ppp = mu.interface.panes.PythonProcessPane()
mock_cursor = mock.MagicMock()
mock_cursor.position.return_value = 100
ppp.start_of_current_line = 1
ppp.setTextCursor = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.insert(b'hello')
assert mock_cursor.movePosition.call_count == 0
mock_cursor.insertText.assert_called_once_with('hello')
def test_PythonProcessPane_backspace():
"""
Make sure that removing a character to the left of the current cursor
position works as expected.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.start_of_current_line = 123
mock_cursor = mock.MagicMock()
mock_cursor.position.return_value = 124
mock_cursor.deletePreviousChar = mock.MagicMock()
ppp.setTextCursor = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.backspace()
mock_cursor.deletePreviousChar.assert_called_once_with()
ppp.setTextCursor.assert_called_once_with(mock_cursor)
def test_PythonProcessPane_backspace_at_start_of_input_line():
"""
Make sure that removing a character will not work if the cursor is at the
left-hand boundary of the input line.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.start_of_current_line = 123
mock_cursor = mock.MagicMock()
mock_cursor.position.return_value = 123
mock_cursor.deletePreviousChar = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.backspace()
assert mock_cursor.deletePreviousChar.call_count == 0
def test_PythonProcessPane_delete():
"""
Make sure that removing a character to the right of the current cursor
position works as expected.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.start_of_current_line = 123
mock_cursor = mock.MagicMock()
mock_cursor.position.return_value = 124
mock_cursor.deletePreviousChar = mock.MagicMock()
ppp.setTextCursor = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.delete()
mock_cursor.deleteChar.assert_called_once_with()
ppp.setTextCursor.assert_called_once_with(mock_cursor)
def test_PythonProcessPane_delete_at_start_of_input_line():
"""
Make sure that removing a character will not work if the cursor is at the
left-hand boundary of the input line.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.start_of_current_line = 123
mock_cursor = mock.MagicMock()
mock_cursor.position.return_value = 122
mock_cursor.deletePreviousChar = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.delete()
assert mock_cursor.deleteChar.call_count == 0
def test_PythonProcessPane_clear_input_line():
"""
Ensure the input line is cleared back to the start of the input line.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.start_of_current_line = 0
ppp.toPlainText = mock.MagicMock(return_value='hello')
mock_cursor = mock.MagicMock()
ppp.setTextCursor = mock.MagicMock()
ppp.textCursor = mock.MagicMock(return_value=mock_cursor)
ppp.clear_input_line()
assert mock_cursor.deletePreviousChar.call_count == 5
mock_cursor.movePosition.assert_called_once_with(QTextCursor.End)
ppp.setTextCursor.assert_called_once_with(mock_cursor)
def test_PythonProcessPane_replace_input_line():
"""
Ensure that the input line is cleared and then the replacement text is
appended to the text area.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.clear_input_line = mock.MagicMock()
ppp.append = mock.MagicMock()
ppp.replace_input_line('hello')
ppp.clear_input_line.assert_called_once_with()
ppp.append.assert_called_once_with('hello')
def test_PythonProcessPane_set_font_size():
"""
Ensure the font size is set to the expected point size.
"""
ppp = mu.interface.panes.PythonProcessPane()
mock_font = mock.MagicMock()
ppp.font = mock.MagicMock(return_value=mock_font)
ppp.setFont = mock.MagicMock()
ppp.set_font_size(123)
mock_font.setPointSize.assert_called_once_with(123)
ppp.setFont.assert_called_once_with(mock_font)
def test_PythonProcessPane_set_zoom():
"""
Ensure the expected point size is set from the given "t-shirt" size.
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.set_font_size = mock.MagicMock()
ppp.set_zoom('xl')
expected = mu.interface.panes.PANE_ZOOM_SIZES['xl']
ppp.set_font_size.assert_called_once_with(expected)
def test_PythonProcessPane_set_theme():
"""
Setting the theme shouldn't do anything
"""
ppp = mu.interface.panes.PythonProcessPane()
ppp.set_theme('test')
def test_DebugInspectorItem():
item = mu.interface.panes.DebugInspectorItem('test')
assert item.text() == 'test'
assert not item.isEditable()
def test_DebugInspector_set_font_size():
"""
Check the correct stylesheet values are being set.
"""
di = mu.interface.panes.DebugInspector()
di.setStyleSheet = mock.MagicMock()
di.set_font_size(16)
style = di.setStyleSheet.call_args[0][0]
assert 'font-size: 16pt;' in style
assert 'font-family: Monospace;' in style
def test_DebugInspector_set_zoom():
"""
Ensure the expected point size is set from the given "t-shirt" size.
"""
di = mu.interface.panes.DebugInspector()
di.set_font_size = mock.MagicMock()
di.set_zoom('xl')
expected = mu.interface.panes.PANE_ZOOM_SIZES['xl']
di.set_font_size.assert_called_once_with(expected)
def test_DebugInspector_set_theme():
"""
Setting the theme shouldn't do anything
"""
di = mu.interface.panes.DebugInspector()
di.set_theme('test')
def test_PlotterPane_init():
"""
Ensure the plotter pane is created in the expected manner.
"""
pp = mu.interface.panes.PlotterPane()
assert pp.input_buffer == []
assert pp.raw_data == []
assert pp.max_x == 100
assert pp.max_y == 1000
assert len(pp.data) == 1
assert isinstance(pp.data[0], deque)
assert len(pp.series) == 1
assert isinstance(pp.series[0], QLineSeries)
assert isinstance(pp.chart, QChart)
assert isinstance(pp.axis_x, QValueAxis)
assert isinstance(pp.axis_y, QValueAxis)
def test_PlotterPane_process_bytes():
"""
If a byte representation of a Python tuple containing numeric values,
starting at the beginning of a new line and terminating with a new line is
received, then the add_data method is called with the resulting Python
tuple.
"""
pp = mu.interface.panes.PlotterPane()
pp.add_data = mock.MagicMock()
pp.process_bytes(b'(1, 2.3, 4)\r\n')
pp.add_data.assert_called_once_with((1, 2.3, 4))
def test_PlotterPane_process_bytes_guards_against_data_flood():
"""
If the process_bytes method gets data of more than 1024 bytes then trigger
a data_flood signal and ensure the plotter no longer processes incoming
bytes.
(The assumption is that Mu will clean up once the data_flood signal is
emitted.)
"""
pp = mu.interface.panes.PlotterPane()
pp.data_flood = mock.MagicMock()
pp.add_data = mock.MagicMock()
data_flood = b'X' * 1025
pp.process_bytes(data_flood)
assert pp.flooded is True
pp.data_flood.emit.assert_called_once_with()
assert pp.add_data.call_count == 0
pp.process_bytes(data_flood)
assert pp.add_data.call_count == 0
def test_PlotterPane_process_bytes_tuple_not_numeric():
"""
If a byte representation of a tuple is received but it doesn't contain
numeric values, then the add_data method MUST NOT be called.
"""
pp = mu.interface.panes.PlotterPane()
pp.add_data = mock.MagicMock()
pp.process_bytes(b'("a", "b", "c")\r\n')
assert pp.add_data.call_count == 0
def test_PlotterPane_process_bytes_overrun_input_buffer():
"""
If the incoming bytes are not complete, ensure the input_buffer caches them
until the newline is detected.
"""
pp = mu.interface.panes.PlotterPane()
pp.add_data = mock.MagicMock()
pp.process_bytes(b'(1, 2.3, 4)\r\n')
pp.add_data.assert_called_once_with((1, 2.3, 4))
pp.add_data.reset_mock()
pp.process_bytes(b'(1, 2.')
assert pp.add_data.call_count == 0
pp.process_bytes(b'3, 4)\r\n')
pp.add_data.assert_called_once_with((1, 2.3, 4))
pp.add_data.reset_mock()
pp.process_bytes(b'(1, 2.3, 4)\r\n')
pp.add_data.assert_called_once_with((1, 2.3, 4))
def test_PlotterPane_add_data():
"""
Given a tuple with a single value, ensure it is logged and correctly added
to the chart.
"""
pp = mu.interface.panes.PlotterPane()
mock_line_series = mock.MagicMock()
pp.series = [mock_line_series, ]
pp.add_data((1, ))
assert (1, ) in pp.raw_data
mock_line_series.clear.assert_called_once_with()
for i in range(99):
mock_line_series.append.call_args_list[i][0] == (i, 0)
mock_line_series.append.call_args_list[99][0] == (99, 1)
def test_PlotterPane_add_data_adjust_values_up():
"""
If more values than have been encountered before are added to the incoming
data then increase the number of QLineSeries instances.
"""
pp = mu.interface.panes.PlotterPane()
pp.series = [mock.MagicMock(), ]
pp.chart = mock.MagicMock()
with mock.patch('mu.interface.panes.QLineSeries'):
pp.add_data((1, 2, 3, 4))
assert len(pp.series) == 4
assert pp.chart.addSeries.call_count == 3
assert pp.chart.setAxisX.call_count == 3
assert pp.chart.setAxisY.call_count == 3
assert len(pp.data) == 4
def test_PlotterPane_add_data_adjust_values_down():
"""
If less values are encountered, before they are added to the incoming
data then decrease the number of QLineSeries instances.
"""
pp = mu.interface.panes.PlotterPane()
pp.series = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
pp.data.append(mock.MagicMock())
pp.data.append(mock.MagicMock())
pp.chart = mock.MagicMock()
with mock.patch('mu.interface.panes.QLineSeries'):
pp.add_data((1, ))
assert len(pp.series) == 1
assert len(pp.data) == 1
assert pp.chart.removeSeries.call_count == 2
def test_PlotterPane_add_data_re_scale_up():
"""
If the y axis contains data greater than the current range, then ensure
the range is doubled.
"""
pp = mu.interface.panes.PlotterPane()
pp.axis_y = mock.MagicMock()
mock_line_series = mock.MagicMock()
pp.series = [mock_line_series, ]
pp.add_data((1001, ))
assert pp.max_y == 2000
pp.axis_y.setRange.assert_called_once_with(-2000, 2000)
def test_PlotterPane_add_data_re_scale_down():
"""
If the y axis contains data less than half of the current range, then
ensure the range is halved.
"""
pp = mu.interface.panes.PlotterPane()
pp.max_y = 4000
pp.axis_y = mock.MagicMock()
mock_line_series = mock.MagicMock()
pp.series = [mock_line_series, ]
pp.add_data((1999, ))
assert pp.max_y == 2000
pp.axis_y.setRange.assert_called_once_with(-2000, 2000)
def test_PlotterPane_set_label_format_to_float_when_range_small():
"""
If the max_y is 5 or less, make sure the label format is set to being a
float with two decimal places.
"""
pp = mu.interface.panes.PlotterPane()
pp.max_y = 10
pp.axis_y = mock.MagicMock()
mock_line_series = mock.MagicMock()
pp.series = [mock_line_series, ]
pp.add_data((1, ))
assert pp.max_y == 1
pp.axis_y.setRange.assert_called_once_with(-1, 1)
pp.axis_y.setLabelFormat.assert_called_once_with("%2.2f")
def test_PlotterPane_set_label_format_to_int_when_range_large():
"""
If the max_y is 5 or less, make sure the label format is set to being a
float with two decimal places.
"""
pp = mu.interface.panes.PlotterPane()
pp.max_y = 5
pp.axis_y = mock.MagicMock()
mock_line_series = mock.MagicMock()
pp.series = [mock_line_series, ]
pp.add_data((10, ))
assert pp.max_y == 10
pp.axis_y.setRange.assert_called_once_with(-10, 10)
pp.axis_y.setLabelFormat.assert_called_once_with("%d")
def test_PlotterPane_set_theme():
"""
Ensure the themes for the chart relate correctly to the theme names used
by Mu.
"""
pp = mu.interface.panes.PlotterPane()
pp.chart = mock.MagicMock()
pp.set_theme('day')
pp.chart.setTheme.assert_called_once_with(QChart.ChartThemeLight)
pp.chart.setTheme.reset_mock()
pp.set_theme('night')
pp.chart.setTheme.assert_called_once_with(QChart.ChartThemeDark)
pp.chart.setTheme.reset_mock()
pp.set_theme('contrast')
pp.chart.setTheme.assert_called_once_with(QChart.ChartThemeHighContrast)
|
stestagg/mu
|
tests/interface/test_panes.py
|
Python
|
gpl-3.0
| 89,147
|
# test basic capability to start a new thread
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
try:
import utime as time
except ImportError:
import time
import _thread
def foo():
pass
def thread_entry(n):
for i in range(n):
foo()
_thread.start_new_thread(thread_entry, (10,))
_thread.start_new_thread(thread_entry, (20,))
# wait for threads to finish
time.sleep(1)
print('done')
|
AriZuu/micropython
|
tests/thread/thread_start1.py
|
Python
|
mit
| 435
|
from os.path import join
from pythonforandroid.recipe import PythonRecipe
class ZBarRecipe(PythonRecipe):
version = '0.10'
# For some reason the version 0.10 on PyPI is not the same as the ones
# in sourceforge and GitHub. The one in PyPI has a setup.py.
# url = 'https://github.com/ZBar/ZBar/archive/{version}.zip'
url = 'https://pypi.python.org/packages/e0/5c/' + \
'bd2a96a9f2adacffceb4482cdd56831735ab5a67ea6a60c0a8757c17b62e' + \
'/zbar-{version}.tar.gz'
call_hostpython_via_targetpython = False
depends = ['setuptools', 'libzbar']
patches = ["zbar-0.10-python-crash.patch"]
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super().get_recipe_env(arch, with_flags_in_cc)
libzbar = self.get_recipe('libzbar', self.ctx)
libzbar_dir = libzbar.get_build_dir(arch.arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['CFLAGS'] += ' -I' + join(libzbar_dir, 'include')
env['LDFLAGS'] += ' -L' + join(libzbar_dir, 'zbar', '.libs')
env['LIBS'] = env.get('LIBS', '') + ' -landroid -lzbar'
return env
recipe = ZBarRecipe()
|
germn/python-for-android
|
pythonforandroid/recipes/zbar/__init__.py
|
Python
|
mit
| 1,172
|
#!/usr/bin/env python3
#import pkg_resources
#pkg_resources.require("requests>=2.10.0")
import sys
sys.path.insert(0, "/usr/local/lib/python3.4/dist-packages/")
import os, sys, json, pymongo, requests, ephem, datetime, math
from housepy import config, log, util, geo
from mongo import db
SOURCE = "server"
try:
entry = {'source': SOURCE}
result = list(db.entries.find({'source': "gps"}).sort([("t_utc", pymongo.DESCENDING)]).limit(1))[0]
lat, lon = result['latitude'], result['longitude']
entry.update({'latitude': result['latitude'], 'longitude': result['longitude'], 'altitude_m': result['altitude_m']})
except Exception as e:
log.info(log.exc(e))
exit()
# Tarrytown, Hudson River, New York (Tarrytown)
# Madison Ave. Bridge, New York Current (Bronx)
# Brooklyn Bridge (Brooklyn)
# lat, lon = 41.255873,-73.9676297 # king marine
# # lat, lon = 40.706172,-73.930953 # bushwick
# lat, lon = 40.6901015,-74.0111785 # governor's island
# # lat, lon = 40.8255327,-73.893846 # concrete plant park
# lat, lon = 41.1810336,-73.9069995 # croton
# lat, lon = 40.7055395,-74.0219601
# url = "http://api.wunderground.com/api/%s/geolookup/q/%s,%s.json" % (config['weather'], lat, lon)
# state = requests.get(url).json()['location']['state']
# city = requests.get(url).json()['location']['city'].strip("The ")
def get_tide(entry):
try:
stations = { (40.7033,-73.9883): "Brooklyn",
(40.8133,-73.935): "Bronx",
(41.0783,-73.87): "Tarrytown"
}
closest_miles = 10000
closest_city = None
for location, city in stations.items():
miles = geo.distance((entry['longitude'], entry['latitude']), (location[1], location[0]))
if miles < closest_miles:
closest_miles = miles
closest_city = city
response = requests.get("http://api.wunderground.com/api/%s/rawtide/q/NY/%s.json" % (config['weather'], closest_city))
data = response.json()
t_utc, height = data['rawtide']['rawTideObs'][0]['epoch'], data['rawtide']['rawTideObs'][0]['height']
entry.update({'tide_station': city, 'tide_height_ft': height})
except Exception as e:
log.error(log.exc(e))
return entry
def get_sun(entry):
try:
observer = ephem.Observer()
observer.lon = entry['longitude']
observer.lat = entry['latitude']
observer.elevation = entry['altitude_m']
dt = datetime.datetime.utcnow() # always UTC
observer.date = dt.strftime("%Y/%m/%d %H:%M:%S")
sun = ephem.Sun(observer)
radians = float(sun.alt)
degrees = math.degrees(radians)
entry.update({'sun_deg': degrees})
except Exception as e:
log.error(e)
return entry
entry = get_tide(entry)
entry = get_sun(entry)
try:
response = requests.post("http://54.235.200.47", json=entry, timeout=5)
log.info(response.status_code)
except Exception as e:
log.error(log.exc(e))
|
biomearts/swale_api
|
server_sensors.py
|
Python
|
gpl-3.0
| 3,056
|
#!/usr/bin/python
import os
import subprocess
from exceptions import YBError
READ = object() # read from
WRITE = object() # write to
#-----------------------------------------------------------------------------
def check_error(cmd, code):
if code < 0:
raise YBError('"%s" got signal %d', cmd, -code, exit = 1)
if code > 0:
raise YBError('"%s" exited with code %d', cmd, code, exit = 1)
#-----------------------------------------------------------------------------
# wrapper that dies with YBError on I/O error or on non-zero exit
class OutPipe:
def __init__(self, cmd, proc):
self._cmd = cmd
self._proc = proc
def __del__(self):
if self._proc is not None:
self.close()
def write(self, data):
try:
return self._proc.stdin.write(data)
except IOError:
self.close()
# close() probably already raised an error, but if the command did
# exit(0), let's die
raise YBError('"%s" exited unexpectedly', self._cmd, exit = 1)
def sync(self):
try:
return self._proc.stdin.sync()
except IOError:
self.close()
# close() probably already raised an error, but if the command did
# exit(0), let's die
raise YBError('"%s" exited unexpectedly', self._cmd, exit = 1)
def close(self):
proc = self._proc
self._proc = None
try:
proc.communicate()
check_error(self._cmd, proc.returncode)
except IOError:
# it would be weird if I/O error happened on close(), but it could be
# flushing buffers or something
raise YBError('"%s" exited unexpectedly', self._cmd, exit = 1)
#-----------------------------------------------------------------------------
def run(command, chroot = None, pipe = None, env = None):
if not isinstance(command, (tuple, list)):
command = command.split(' ')
if chroot is not None:
def chroot_fun(*args):
os.chdir(chroot)
os.chroot('.')
else:
chroot_fun = None
if pipe is None:
proc = subprocess.Popen(
command,
env = env,
stdin = open('/dev/null'),
preexec_fn = chroot_fun,
)
proc.wait()
check_error(command[0], proc.returncode)
elif pipe is READ:
proc = subprocess.Popen(
command,
env = env,
stdin = open('/dev/null'),
stdout = subprocess.PIPE,
preexec_fn = chroot_fun,
)
(result,_) = proc.communicate()
check_error(command[0], proc.returncode)
return result
elif pipe is WRITE:
proc = subprocess.Popen(
command,
env = env,
stdin = subprocess.PIPE,
preexec_fn = chroot_fun,
)
return OutPipe(command[0], proc)
#-----------------------------------------------------------------------------
# vim:ft=python
|
dozzie/yumbootstrap
|
lib/yumbootstrap/sh.py
|
Python
|
gpl-3.0
| 2,741
|
#vim: set encoding=utf-8
import os
import shutil
import tempfile
from unittest import TestCase
from lxml import etree
from regparser.notice import build, changes
from regparser.notice.diff import DesignateAmendment, Amendment
from regparser.tree.struct import Node
import settings
class NoticeBuildTest(TestCase):
def setUp(self):
self.original_local_xml_paths = settings.LOCAL_XML_PATHS
settings.LOCAL_XML_PATHS = []
self.dir1 = tempfile.mkdtemp()
self.dir2 = tempfile.mkdtemp()
def tearDown(self):
settings.LOCAL_XML_PATHS = self.original_local_xml_paths
shutil.rmtree(self.dir1)
shutil.rmtree(self.dir2)
def test_build_notice(self):
fr = {
'abstract': 'sum sum sum',
'action': 'actact',
'agency_names': ['Agency 1', 'Agency 2'],
'cfr_references': [{'title': 12, 'part': 9191},
{'title': 12, 'part': 9292}],
'citation': 'citation citation',
'comments_close_on': None,
'dates': 'date info',
'document_number': '7878-111',
'effective_on': '1956-09-09',
'end_page': 9999,
'full_text_xml_url': None,
'html_url': 'some url',
'publication_date': '1955-12-10',
'regulation_id_numbers': ['a231a-232q'],
'start_page': 8888,
'type': 'Rule',
'volume': 66,
}
self.assertEqual(build.build_notice('5', '9292', fr), [{
'abstract': 'sum sum sum',
'action': 'actact',
'agency_names': ['Agency 1', 'Agency 2'],
'cfr_parts': ['9191', '9292'],
'cfr_title': '5',
'document_number': '7878-111',
'effective_on': '1956-09-09',
'fr_citation': 'citation citation',
'fr_url': 'some url',
'fr_volume': 66,
'initial_effective_on': '1956-09-09',
'meta': {
'dates': 'date info',
'end_page': 9999,
'start_page': 8888,
'type': 'Rule'
},
'publication_date': '1955-12-10',
'regulation_id_numbers': ['a231a-232q'],
}])
def test_process_xml(self):
"""Integration test for xml processing"""
xml = """
<ROOT>
<SUPLINF>
<FURINF>
<HD>CONTACT INFO:</HD>
<P>Extra contact info here</P>
</FURINF>
<ADD>
<P>Email: example@example.com</P>
<P>Extra instructions</P>
</ADD>
<HD SOURCE="HED">Supplementary Info</HD>
<HD SOURCE="HD1">V. Section-by-Section Analysis</HD>
<HD SOURCE="HD2">8(q) Words</HD>
<P>Content</P>
<HD SOURCE="HD1">Section that follows</HD>
<P>Following Content</P>
</SUPLINF>
</ROOT>"""
notice = {'cfr_parts': ['9292'], 'meta': {'start_page': 100}, 'document_number': '1999-12345'}
self.assertEqual(build.process_xml(notice, etree.fromstring(xml)), {
'cfr_parts': ['9292'],
'footnotes': {},
'meta': {'start_page': 100},
'document_number': '1999-12345',
'addresses': {
'methods': [('Email', 'example@example.com')],
'instructions': ['Extra instructions']
},
'contact': 'Extra contact info here',
'section_by_section': [{
'title': '8(q) Words',
'paragraphs': ['Content'],
'children': [],
'footnote_refs': [],
'page': 100,
'labels': ['9292-8-q']
}],
})
def test_process_xml_missing_fields(self):
xml = """
<ROOT>
<SUPLINF>
<HD SOURCE="HED">Supplementary Info</HD>
<HD SOURCE="HD1">V. Section-by-Section Analysis</HD>
<HD SOURCE="HD2">8(q) Words</HD>
<P>Content</P>
<HD SOURCE="HD1">Section that follows</HD>
<P>Following Content</P>
</SUPLINF>
</ROOT>"""
notice = {'cfr_parts': ['9292'], 'meta': {'start_page': 210},
'document_number': '1999-12345',}
self.assertEqual(build.process_xml(notice, etree.fromstring(xml)), {
'cfr_parts': ['9292'],
'footnotes': {},
'meta': {'start_page': 210},
'document_number': '1999-12345',
'section_by_section': [{
'title': '8(q) Words',
'paragraphs': ['Content'],
'children': [],
'footnote_refs': [],
'page': 210,
'labels': ['9292-8-q']
}],
})
def test_process_xml_fill_effective_date(self):
xml = """
<ROOT>
<DATES>
<P>Effective January 1, 2002</P>
</DATES>
</ROOT>"""
xml = etree.fromstring(xml)
notice = {'cfr_parts': ['902'], 'meta': {'start_page': 10},
'document_number': '1999-12345', 'effective_on': '2002-02-02'}
notice = build.process_xml(notice, xml)
self.assertEqual('2002-02-02', notice['effective_on'])
notice = {'cfr_parts': ['902'], 'meta': {'start_page': 10},
'document_number': '1999-12345',}
notice = build.process_xml(notice, xml)
# Uses the date found in the XML
self.assertEqual('2002-01-01', notice['effective_on'])
notice = {'cfr_parts': ['902'], 'meta': {'start_page': 10},
'document_number': '1999-12345', 'effective_on': None}
notice = build.process_xml(notice, xml)
# Uses the date found in the XML
self.assertEqual('2002-01-01', notice['effective_on'])
def test_add_footnotes(self):
xml = """
<ROOT>
<P>Some text</P>
<FTNT>
<P><SU>21</SU>Footnote text</P>
</FTNT>
<FTNT>
<P><SU>43</SU>This has a<PRTPAGE P="2222" />break</P>
</FTNT>
<FTNT>
<P><SU>98</SU>This one has<E T="03">emph</E>tags</P>
</FTNT>
</ROOT>"""
notice = {}
build.add_footnotes(notice, etree.fromstring(xml))
self.assertEqual(notice, {'footnotes': {
'21': 'Footnote text',
'43': 'This has a break',
'98': 'This one has <em data-original="E-03">emph</em> tags'
}})
def test_process_designate_subpart(self):
p_list = ['200-?-1-a', '200-?-1-b']
destination = '205-Subpart:A'
amended_label = DesignateAmendment('DESIGNATE', p_list, destination)
subpart_changes = build.process_designate_subpart(amended_label)
self.assertEqual(['200-1-a', '200-1-b'], subpart_changes.keys())
for p, change in subpart_changes.items():
self.assertEqual(change['destination'], ['205', 'Subpart', 'A'])
self.assertEqual(change['action'], 'DESIGNATE')
def test_process_amendments(self):
xml = u"""
<REGTEXT PART="105" TITLE="12">
<SUBPART>
<HD SOURCE="HED">Subpart A—General</HD>
</SUBPART>
<AMDPAR>
2. Designate §§ 105.1 through 105.3 as subpart A under the heading.
</AMDPAR>
</REGTEXT>"""
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, notice_xml)
section_list = ['105-2', '105-3', '105-1']
self.assertEqual(notice['changes'].keys(), section_list)
for l, c in notice['changes'].items():
change = c[0]
self.assertEqual(change['destination'], ['105', 'Subpart', 'A'])
self.assertEqual(change['action'], 'DESIGNATE')
def test_process_amendments_section(self):
xml = u"""
<REGTEXT PART="105" TITLE="12">
<AMDPAR>
3. In § 105.1, revise paragraph (b) to read as follows:
</AMDPAR>
<SECTION>
<SECTNO>§ 105.1</SECTNO>
<SUBJECT>Purpose.</SUBJECT>
<STARS/>
<P>(b) This part carries out.</P>
</SECTION>
</REGTEXT>
"""
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, notice_xml)
self.assertEqual(notice['changes'].keys(), ['105-1-b'])
changes = notice['changes']['105-1-b'][0]
self.assertEqual(changes['action'], 'PUT')
self.assertTrue(changes['node']['text'].startswith(
u'(b) This part carries out.'))
def test_process_amendments_multiple_in_same_parent(self):
xml = u"""
<REGTEXT PART="105" TITLE="12">
<AMDPAR>
1. In § 105.1, revise paragraph (b) to read as follows:
</AMDPAR>
<AMDPAR>2. Also, revise paragraph (c):</AMDPAR>
<SECTION>
<SECTNO>§ 105.1</SECTNO>
<SUBJECT>Purpose.</SUBJECT>
<STARS/>
<P>(b) This part carries out.</P>
<P>(c) More stuff</P>
</SECTION>
</REGTEXT>"""
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, notice_xml)
self.assertEqual(notice['changes'].keys(), ['105-1-b', '105-1-c'])
changes = notice['changes']['105-1-b'][0]
self.assertEqual(changes['action'], 'PUT')
self.assertEqual(changes['node']['text'].strip(),
u'(b) This part carries out.')
changes = notice['changes']['105-1-c'][0]
self.assertEqual(changes['action'], 'PUT')
self.assertTrue(changes['node']['text'].strip(),
u'(c) More stuff')
def test_process_amendments_restart_new_section(self):
xml = u"""
<ROOT>
<REGTEXT PART="104" TITLE="12">
<AMDPAR>
1. In Supplement I to Part 104, comment 22(a) is added
</AMDPAR>
<P>Content</P>
</REGTEXT>
<REGTEXT PART="105" TITLE="12">
<AMDPAR>
3. In § 105.1, revise paragraph (b) to read as follows:
</AMDPAR>
<SECTION>
<SECTNO>§ 105.1</SECTNO>
<SUBJECT>Purpose.</SUBJECT>
<STARS/>
<P>(b) This part carries out.</P>
</SECTION>
</REGTEXT>
</ROOT>"""
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, notice_xml)
self.assertEqual(2, len(notice['amendments']))
c22a, b = notice['amendments']
self.assertEqual(c22a.action, 'POST')
self.assertEqual(b.action, 'PUT')
self.assertEqual(c22a.label, ['104', '22', 'a', 'Interp'])
self.assertEqual(b.label, ['105', '1', 'b'])
def test_process_amendments_no_nodes(self):
xml = u"""
<ROOT>
<REGTEXT PART="104" TITLE="12">
<AMDPAR>
1. In § 104.13, paragraph (b) is removed
</AMDPAR>
</REGTEXT>
</ROOT>"""
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['104']}
build.process_amendments(notice, notice_xml)
self.assertEqual(1, len(notice['amendments']))
delete = notice['amendments'][0]
self.assertEqual(delete.action, 'DELETE')
self.assertEqual(delete.label, ['104', '13', 'b'])
def new_subpart_xml(self):
xml = u"""
<RULE>
<REGTEXT PART="105" TITLE="12">
<AMDPAR>
3. In § 105.1, revise paragraph (b) to read as follows:
</AMDPAR>
<SECTION>
<SECTNO>§ 105.1</SECTNO>
<SUBJECT>Purpose.</SUBJECT>
<STARS/>
<P>(b) This part carries out.</P>
</SECTION>
</REGTEXT>
<REGTEXT PART="105" TITLE="12">
<AMDPAR>
6. Add subpart B to read as follows:
</AMDPAR>
<CONTENTS>
<SUBPART>
<SECHD>Sec.</SECHD>
<SECTNO>105.30</SECTNO>
<SUBJECT>First In New Subpart.</SUBJECT>
</SUBPART>
</CONTENTS>
<SUBPART>
<HD SOURCE="HED">Subpart B—Requirements</HD>
<SECTION>
<SECTNO>105.30</SECTNO>
<SUBJECT>First In New Subpart</SUBJECT>
<P>For purposes of this subpart, the follow apply:</P>
<P>(a) "Agent" means agent.</P>
</SECTION>
</SUBPART>
</REGTEXT>
</RULE>"""
return xml
def test_process_new_subpart(self):
xml = self.new_subpart_xml()
notice_xml = etree.fromstring(xml)
par = notice_xml.xpath('//AMDPAR')[1]
amended_label = Amendment('POST', '105-Subpart:B')
notice = {'cfr_parts': ['105']}
subpart_changes = build.process_new_subpart(notice, amended_label, par)
new_nodes_added = ['105-Subpart-B', '105-30', '105-30-def0', '105-30-a']
self.assertEqual(new_nodes_added, subpart_changes.keys())
for l, n in subpart_changes.items():
self.assertEqual(n['action'], 'POST')
self.assertEqual(
subpart_changes['105-Subpart-B']['node']['node_type'], 'subpart')
def test_process_amendments_subpart(self):
xml = self.new_subpart_xml()
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, notice_xml)
self.assertTrue('105-Subpart-B' in notice['changes'].keys())
self.assertTrue('105-30-a' in notice['changes'].keys())
self.assertTrue('105-30' in notice['changes'].keys())
def test_process_amendments_mix_regs(self):
"""Some notices apply to multiple regs. For now, just ignore the
sections not associated with the reg we're focused on"""
xml = u"""
<ROOT>
<REGTEXT PART="105" TITLE="12">
<AMDPAR>
3. In § 105.1, revise paragraph (a) to read as follows:
</AMDPAR>
<SECTION>
<SECTNO>§ 105.1</SECTNO>
<SUBJECT>105Purpose.</SUBJECT>
<P>(a) 105Content</P>
</SECTION>
</REGTEXT>
<REGTEXT PART="106" TITLE="12">
<AMDPAR>
3. In § 106.3, revise paragraph (b) to read as follows:
</AMDPAR>
<SECTION>
<SECTNO>§ 106.3</SECTNO>
<SUBJECT>106Purpose.</SUBJECT>
<P>(b) Content</P>
</SECTION>
</REGTEXT>
</ROOT>
"""
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['105', '106']}
build.process_amendments(notice, notice_xml)
self.assertEqual(2, len(notice['changes']))
self.assertTrue('105-1-a' in notice['changes'])
self.assertTrue('106-3-b' in notice['changes'])
def test_process_amendments_context(self):
"""Context should carry over between REGTEXTs"""
xml = u"""
<ROOT>
<REGTEXT TITLE="12">
<AMDPAR>
3. In § 106.1, revise paragraph (a) to read as follows:
</AMDPAR>
</REGTEXT>
<REGTEXT TITLE="12">
<AMDPAR>
3. Add appendix C
</AMDPAR>
</REGTEXT>
</ROOT>
"""
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['105', '106']}
build.process_amendments(notice, notice_xml)
self.assertEqual(2, len(notice['amendments']))
amd1, amd2 = notice['amendments']
self.assertEqual(['106', '1', 'a'], amd1.label)
self.assertEqual(['106', 'C'], amd2.label)
def test_introductory_text(self):
""" Sometimes notices change just the introductory text of a paragraph
(instead of changing the entire paragraph tree). """
xml = u"""
<REGTEXT PART="106" TITLE="12">
<AMDPAR>
3. In § 106.2, revise the introductory text to read as follows:
</AMDPAR>
<SECTION>
<SECTNO>§ 106.2</SECTNO>
<SUBJECT> Definitions </SUBJECT>
<P> Except as otherwise provided, the following apply. </P>
</SECTION>
</REGTEXT>
"""
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['106']}
build.process_amendments(notice, notice_xml)
self.assertEqual('[text]', notice['changes']['106-2'][0]['field'])
def test_multiple_changes(self):
""" A notice can have two modifications to a paragraph. """
xml = u"""
<ROOT>
<REGTEXT PART="106" TITLE="12">
<AMDPAR>
2. Designate §§ 106.1 through 106.3 as subpart A under the heading.
</AMDPAR>
</REGTEXT>
<REGTEXT PART="106" TITLE="12">
<AMDPAR>
3. In § 106.2, revise the introductory text to read as follows:
</AMDPAR>
<SECTION>
<SECTNO>§ 106.2</SECTNO>
<SUBJECT> Definitions </SUBJECT>
<P> Except as otherwise provided, the following apply. </P>
</SECTION>
</REGTEXT>
</ROOT>
"""
notice_xml = etree.fromstring(xml)
notice = {'cfr_parts': ['106']}
build.process_amendments(notice, notice_xml)
self.assertEqual(2, len(notice['changes']['106-2']))
def test_create_xmlless_changes(self):
labels_amended = [Amendment('DELETE', '200-2-a'),
Amendment('MOVE', '200-2-b', '200-2-c')]
notice_changes = changes.NoticeChanges()
build.create_xmlless_changes(labels_amended, notice_changes)
delete = notice_changes.changes['200-2-a'][0]
move = notice_changes.changes['200-2-b'][0]
self.assertEqual({'action': 'DELETE'}, delete)
self.assertEqual({'action': 'MOVE', 'destination': ['200', '2', 'c']},
move)
def test_create_xml_changes_reserve(self):
labels_amended = [Amendment('RESERVE', '200-2-a')]
n2a = Node('[Reserved]', label=['200', '2', 'a'])
n2 = Node('n2', label=['200', '2'], children=[n2a])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
reserve = notice_changes.changes['200-2-a'][0]
self.assertEqual(reserve['action'], 'RESERVE')
self.assertEqual(reserve['node']['text'], u'[Reserved]')
def test_create_xml_changes_stars(self):
labels_amended = [Amendment('PUT', '200-2-a')]
n2a1 = Node('(1) Content', label=['200', '2', 'a', '1'])
n2a2 = Node('(2) Content', label=['200', '2', 'a', '2'])
n2a = Node('(a) * * *', label=['200', '2', 'a'], children=[n2a1, n2a2])
n2 = Node('n2', label=['200', '2'], children=[n2a])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
for label in ('200-2-a-1', '200-2-a-2'):
self.assertTrue(label in notice_changes.changes)
self.assertEqual(1, len(notice_changes.changes[label]))
change = notice_changes.changes[label][0]
self.assertEqual('PUT', change['action'])
self.assertFalse('field' in change)
self.assertTrue('200-2-a' in notice_changes.changes)
self.assertEqual(1, len(notice_changes.changes['200-2-a']))
change = notice_changes.changes['200-2-a'][0]
self.assertEqual('KEEP', change['action'])
self.assertFalse('field' in change)
def test_create_xml_changes_stars_hole(self):
labels_amended = [Amendment('PUT', '200-2-a')]
n2a1 = Node('(1) * * *', label=['200', '2', 'a', '1'])
n2a2 = Node('(2) a2a2a2', label=['200', '2', 'a', '2'])
n2a = Node('(a) aaa', label=['200', '2', 'a'], children=[n2a1, n2a2])
n2 = Node('n2', label=['200', '2'], children=[n2a])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
for label in ('200-2-a', '200-2-a-2'):
self.assertTrue(label in notice_changes.changes)
self.assertEqual(1, len(notice_changes.changes[label]))
change = notice_changes.changes[label][0]
self.assertEqual('PUT', change['action'])
self.assertFalse('field' in change)
self.assertTrue('200-2-a-1' in notice_changes.changes)
self.assertEqual(1, len(notice_changes.changes['200-2-a-1']))
change = notice_changes.changes['200-2-a-1'][0]
self.assertEqual('KEEP', change['action'])
self.assertFalse('field' in change)
def test_create_xml_changes_child_stars(self):
labels_amended = [Amendment('PUT', '200-2-a')]
xml = etree.fromstring("<ROOT><P>(a) Content</P><STARS /></ROOT>")
n2a = Node('(a) Content', label=['200', '2', 'a'],
source_xml=xml.xpath('//P')[0])
n2b = Node('(b) Content', label=['200', '2', 'b'])
n2 = Node('n2', label=['200', '2'], children=[n2a, n2b])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
self.assertTrue('200-2-a' in notice_changes.changes)
self.assertTrue(1, len(notice_changes.changes['200-2-a']))
change = notice_changes.changes['200-2-a'][0]
self.assertEqual('PUT', change['action'])
self.assertFalse('field' in change)
n2a.text = n2a.text + ":"
n2a.source_xml.text = n2a.source_xml.text + ":"
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
self.assertTrue('200-2-a' in notice_changes.changes)
self.assertTrue(1, len(notice_changes.changes['200-2-a']))
change = notice_changes.changes['200-2-a'][0]
self.assertEqual('PUT', change['action'])
self.assertEqual('[text]', change.get('field'))
def test_local_version_list(self):
url = 'http://example.com/some/url'
settings.LOCAL_XML_PATHS = [self.dir1, self.dir2]
os.mkdir(self.dir2 + '/some')
f = open(self.dir2 + '/some/url', 'w')
f.write('aaaaa')
f.close()
local_file = self.dir2 + '/some/url'
self.assertEqual([local_file], build._check_local_version_list(url))
os.mkdir(self.dir1 + '/some')
f = open(self.dir1 + '/some/url', 'w')
f.write('bbbbb')
f.close()
local_file_2 = self.dir1 + '/some/url'
self.assertEqual([local_file_2], build._check_local_version_list(url))
def test_local_version_list_split(self):
settings.LOCAL_XML_PATHS = [self.dir1, self.dir2]
os.mkdir(self.dir2 + '/xml/')
f = open(self.dir2 + '/xml/503-1.xml', 'w')
f.write('first_file')
f.close()
f = open(self.dir2 + '/xml/503-2.xml', 'w')
f.write('second_file')
url = 'http://example.com/xml/503.xml'
first = self.dir2 + '/xml/503-1.xml'
second = self.dir2 + '/xml/503-2.xml'
local_versions = build._check_local_version_list(url)
local_versions.sort()
self.assertEqual([first, second], local_versions)
def test_split_doc_num(self):
doc_num = '2013-2222'
effective_date = '2014-10-11'
self.assertEqual(
'2013-2222_20141011',
build.split_doc_num(doc_num, effective_date))
def test_set_document_numbers(self):
notice = {'document_number': '111', 'effective_on': '2013-10-08'}
notices = build.set_document_numbers([notice])
self.assertEqual(notices[0]['document_number'], '111')
second_notice = {'document_number': '222',
'effective_on': '2013-10-10'}
notices = build.set_document_numbers([notice, second_notice])
self.assertEqual(notices[0]['document_number'], '111_20131008')
self.assertEqual(notices[1]['document_number'], '222_20131010')
def test_preprocess_notice_xml_improper_location(self):
notice_xml = etree.fromstring(u"""
<PART>
<REGTEXT>
<AMDPAR>1. In § 105.1, revise paragraph (b):</AMDPAR>
<SECTION>
<STARS />
<P>(b) Content</P>
</SECTION>
<AMDPAR>
3. In § 105.2, revise paragraph (a) to read as follows:
</AMDPAR>
</REGTEXT>
<REGTEXT>
<SECTION>
<P>(a) Content</P>
</SECTION>
</REGTEXT>
</PART>""")
notice_xml = build.preprocess_notice_xml(notice_xml)
amd1b, amd2a = notice_xml.xpath("//AMDPAR")
self.assertEqual(amd1b.getparent().xpath(".//P")[0].text.strip(),
"(b) Content")
self.assertEqual(amd2a.getparent().xpath(".//P")[0].text.strip(),
"(a) Content")
notice_xml = etree.fromstring(u"""
<PART>
<REGTEXT PART="105">
<AMDPAR>1. In § 105.1, revise paragraph (b):</AMDPAR>
<SECTION>
<STARS />
<P>(b) Content</P>
</SECTION>
<AMDPAR>
3. In § 105.2, revise paragraph (a) to read as follows:
</AMDPAR>
</REGTEXT>
<REGTEXT PART="107">
<SECTION>
<P>(a) Content</P>
</SECTION>
</REGTEXT>
</PART>""")
notice_xml = build.preprocess_notice_xml(notice_xml)
amd1b, amd2a = notice_xml.xpath("//AMDPAR")
self.assertEqual(amd1b.getparent().xpath(".//P")[0].text.strip(),
"(b) Content")
self.assertEqual(amd2a.getparent().xpath(".//P")[0].text.strip(),
"(b) Content")
def test_preprocess_notice_xml_interp_amds_are_ps(self):
notice_xml = etree.fromstring(u"""
<PART>
<REGTEXT>
<AMDPAR>1. In § 105.1, revise paragraph (b):</AMDPAR>
<SECTION>
<STARS />
<P>(b) Content</P>
</SECTION>
<P>2. In Supplement I to Part 105,</P>
<P>A. Under Section 105.1, 1(b), paragraph 2 is revised</P>
<P>The revisions are as follows</P>
<HD SOURCE="HD1">Supplement I to Part 105</HD>
<STARS />
<P><E T="03">1(b) Heading</E></P>
<STARS />
<P>2. New Content</P>
</REGTEXT>
</PART>""")
notice_xml = build.preprocess_notice_xml(notice_xml)
amd1, amd2, amd2A, amd_other = notice_xml.xpath("//AMDPAR")
self.assertEqual(amd2A.text.strip(), "A. Under Section 105.1, 1(b), "
+ "paragraph 2 is revised")
def test_preprocess_notice_xml_interp_amds_are_ps2(self):
notice_xml = etree.fromstring(u"""
<PART>
<REGTEXT>
<AMDPAR>1. In Supplement I to Part 105,</AMDPAR>
<P>A. Under Section 105.1, 1(b), paragraph 2 is revised</P>
<P>The revisions are as follows</P>
<HD SOURCE="HD1">Supplement I to Part 105</HD>
<STARS />
<P><E T="03">1(b) Heading</E></P>
<STARS />
<P>2. New Content</P>
</REGTEXT>
</PART>""")
notice_xml = build.preprocess_notice_xml(notice_xml)
amd1, amd1A, amd_other = notice_xml.xpath("//AMDPAR")
self.assertEqual(amd1A.text.strip(), "A. Under Section 105.1, 1(b), "
+ "paragraph 2 is revised")
def test_preprocess_emph_tags(self):
notice_xml = etree.fromstring(u"""
<PART>
<P>(<E T="03">a</E>) Content</P>
<P>(<E T="03">a)</E> Content</P>
<P><E T="03">(a</E>) Content</P>
<P><E T="03">(a)</E> Content</P>
</PART>""")
notice_xml = build.preprocess_notice_xml(notice_xml)
pars = notice_xml.xpath("//P")
self.assertEqual(4, len(pars))
for par in pars:
self.assertEqual(par.text, "(")
self.assertEqual(1, len(par.getchildren()))
em = par.getchildren()[0]
self.assertEqual("E", em.tag)
self.assertEqual("a", em.text)
self.assertEqual(em.tail, ") Content")
self.assertEqual(0, len(em.getchildren()))
notice_xml = etree.fromstring(u"""
<PART>
<P><E T="03">Paragraph 22(a)(5)</E> Content</P>
</PART>""")
notice_xml = build.preprocess_notice_xml(notice_xml)
pars = notice_xml.xpath("//P")
self.assertEqual(1, len(pars))
em = pars[0].getchildren()[0]
self.assertEqual(em.text, "Paragraph 22(a)(5)")
self.assertEqual(em.tail, " Content")
def test_fetch_cfr_parts(self):
notice_xml = etree.fromstring(u"""
<RULE>
<PREAMB>
<CFR>12 CFR Parts 1002, 1024, and 1026</CFR>
</PREAMB>
</RULE>
""")
result = build.fetch_cfr_parts(notice_xml)
self.assertEqual(result, ['1002', '1024', '1026'])
|
adderall/regulations-parser
|
tests/notice_build_tests.py
|
Python
|
cc0-1.0
| 30,845
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer serialization utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras.layers import normalization as batchnorm_v1
from tensorflow.python.keras.layers import normalization_v2 as batchnorm_v2
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2
from tensorflow.python.platform import test
@tf_test_util.run_all_in_graph_and_eager_modes
class LayerSerializationTest(parameterized.TestCase, test.TestCase):
def test_serialize_deserialize(self):
layer = keras.layers.Dense(
3, activation='relu', kernel_initializer='ones', bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L1L2)
if tf2.enabled():
self.assertEqual(new_layer.kernel_initializer.__class__,
keras.initializers.OnesV2)
else:
self.assertEqual(new_layer.kernel_initializer.__class__,
keras.initializers.Ones)
self.assertEqual(new_layer.units, 3)
@parameterized.parameters(
[batchnorm_v1.BatchNormalization, batchnorm_v2.BatchNormalization])
def test_serialize_deserialize_batchnorm(self, batchnorm_layer):
layer = batchnorm_layer(
momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2')
config = keras.layers.serialize(layer)
self.assertEqual(config['class_name'], 'BatchNormalization')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.momentum, 0.9)
if tf2.enabled():
self.assertIsInstance(new_layer, batchnorm_v2.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.ZerosV2)
else:
self.assertIsInstance(new_layer, batchnorm_v1.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.Zeros)
self.assertEqual(new_layer.gamma_regularizer.__class__,
keras.regularizers.L1L2)
@parameterized.parameters(
[batchnorm_v1.BatchNormalization, batchnorm_v2.BatchNormalization])
def test_deserialize_batchnorm_backwards_compatiblity(self, batchnorm_layer):
layer = batchnorm_layer(
momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2')
config = keras.layers.serialize(layer)
# To simulate if BatchNormalizationV1 or BatchNormalizationV2 appears in the
# saved model.
if batchnorm_layer is batchnorm_v1.BatchNormalization:
config['class_name'] = 'BatchNormalizationV1'
else:
config['class_name'] = 'BatchNormalizationV2'
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.momentum, 0.9)
if tf2.enabled():
self.assertIsInstance(new_layer, batchnorm_v2.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.ZerosV2)
else:
self.assertIsInstance(new_layer, batchnorm_v1.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.Zeros)
self.assertEqual(new_layer.gamma_regularizer.__class__,
keras.regularizers.L1L2)
@parameterized.parameters([rnn_v1.LSTM, rnn_v2.LSTM])
def test_serialize_deserialize_lstm(self, layer):
lstm = layer(5, return_sequences=True)
config = keras.layers.serialize(lstm)
self.assertEqual(config['class_name'], 'LSTM')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.units, 5)
self.assertEqual(new_layer.return_sequences, True)
if tf2.enabled():
self.assertIsInstance(new_layer, rnn_v2.LSTM)
else:
self.assertIsInstance(new_layer, rnn_v1.LSTM)
self.assertNotIsInstance(new_layer, rnn_v2.LSTM)
@parameterized.parameters([rnn_v1.GRU, rnn_v2.GRU])
def test_serialize_deserialize_gru(self, layer):
gru = layer(5, return_sequences=True)
config = keras.layers.serialize(gru)
self.assertEqual(config['class_name'], 'GRU')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.units, 5)
self.assertEqual(new_layer.return_sequences, True)
if tf2.enabled():
self.assertIsInstance(new_layer, rnn_v2.GRU)
else:
self.assertIsInstance(new_layer, rnn_v1.GRU)
self.assertNotIsInstance(new_layer, rnn_v2.GRU)
if __name__ == '__main__':
test.main()
|
ghchinoy/tensorflow
|
tensorflow/python/keras/layers/serialization_test.py
|
Python
|
apache-2.0
| 5,547
|
from django.contrib import admin
from aspc.mentalhealth.models import (Insurance, Qualification, Specialty, Tag, Gender, Identity, SexualOrientation,
Ethnicity, Therapist, MentalHealthReview)
admin.site.register(Insurance)
admin.site.register(Qualification)
admin.site.register(Specialty)
admin.site.register(Tag)
admin.site.register(Gender)
admin.site.register(Identity)
admin.site.register(SexualOrientation)
admin.site.register(Ethnicity)
admin.site.register(Therapist)
admin.site.register(MentalHealthReview)
|
aspc/mainsite
|
aspc/mentalhealth/admin.py
|
Python
|
mit
| 552
|
# -*- coding: utf-8 -*-
"""
forms.render_wtform_without_syntaxerr
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Render WTForm fields with html attributes that cause TemplateSyntaxErrors
http://flask.pocoo.org/snippets/107/
"""
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from app import app
"""
Suppose you had a WTForm TextField that you wanted to use a javascript library like bootstrap-typeahead.js on. You might want the field to render as:
<input type="text" data-provide='typeahead', data-items='3', data-source='["x","y","z"]'>
After creating a WTForm in the view and passing it to your template, your first attempt would be to pass the additional keyword arguments:
{{ form.myfield(name='test', data-provide='typeahead', data-data-items='3', data-source='["x","y","z"]') }}
But this will lead to a TemplateSyntaxError because a dash is the subtraction operator in Python, and we can't escape the character in a keyword argument's key.
Instead, pass the HTML attributes that contain invalid syntax as an ad-hoc dictionary:
{{ form.myfield(name='test', **{'data-provide':'typeahead','data-items':'3','data-source': '["x","y","z"]'}) }}
"""
|
fengsp/flask-snippets
|
forms/render_wtform_without_syntaxerr.py
|
Python
|
bsd-3-clause
| 1,222
|
# WORK IN PROGRESS
#run_batch_command.topo_flag_options = dict(c = "print '**Running 1-simple.py example**'",
# p = ('non_param1=None', "non_param2='Non-existant'"))
#if isinstance(tasklauncher, QLauncher):
# tasklauncher.qsub_flag_options.update(dict(pe=['OpenMP', '4']))
|
ioam/svn-history
|
dispatch/examples/topographica/2-intermediate.py
|
Python
|
bsd-3-clause
| 325
|
from .appointment import Appointment
__all__ = ('Appointment',)
|
TwilioDevEd/appointment-reminders-flask
|
models/__init__.py
|
Python
|
mit
| 65
|
# 50. Product of Array Exclude Itself
# Description
# Notes
# Testcase
# Judge
# Given an integers array A.
#
# Define B[i] = A[0] * ... * A[i-1] * A[i+1] * ... * A[n-1], calculate B WITHOUT divide operation.
#
# Have you met this question in a real interview? Yes
# Example
# For A = [1, 2, 3], return [6, 3, 2].
#
#
class Solution:
"""
@param: nums: Given an integers array A
@return: A long long array B and B[i]= A[0] * ... * A[i-1] * A[i+1] * ... * A[n-1]
"""
def productExcludeItself(self, nums):
# write your code here
if not nums or len(nums) == 0: return []
if len(nums) == 1: return [1]
res = [1] * len(nums)
i = 1
while i < len(nums):
res[i] *= res[i-1] * nums[i-1]
i += 1
i, tmp = len(nums) - 2, 1
while i >= 0:
tmp *= nums[i+1]
res[i] *= tmp
i -= 1
return res
|
shawncaojob/LC
|
LINTCODE/50_product_of_array_exclude_itself.py
|
Python
|
gpl-3.0
| 944
|
from math import log
def sort(a_list, base):
"""Sort the input list with the specified base, using Radix sort.
This implementation assumes that the input list does not contain negative
numbers. This algorithm is inspired from the Wikipedia implmentation of
Radix sort.
"""
passes = int(log(max(a_list), base) + 1)
items = a_list[:]
for digit_index in xrange(passes):
buckets = [[] for _ in xrange(base)] # Buckets for sorted sublists.
for item in items:
digit = _get_digit(item, base, digit_index)
buckets[digit].append(item)
items = []
for sublists in buckets:
items.extend(sublists)
return items
def _get_digit(number, base, digit_index):
return (number // base ** digit_index) % base
|
isubuz/zahlen
|
algorithms/sorting/radix_sort.py
|
Python
|
mit
| 805
|
import lcm
import forseti2
import settings
import LCMNode
import time
# def handle_all(channel, data):
# print "received on %s:" % channel
# print " %s" % str(forseti2.Time.decode(data))
# class TestNode(LCMNode.LCMNode):
# def __init__(self, lc):
# self.lc = lc
# self.start_thread()
lc = lcm.LCM(settings.LCM_URI)
def make_packet((num, r, y, g)):
status_light = forseti2.StatusLight()
status_light.red = r == 1
status_light.yellow = y == 1
status_light.green = g == 1
status_light.buzzer = False
return "StatusLight%d/StatusLight" % num, status_light
def grid():
lights = [make_packet((num, num%2, 1-num%2, num%2)) for num in range(8)]
for channel, msg in lights:
lc.publish(channel, msg.encode())
time.sleep(.1)
lights = [make_packet((num, 1-num%2, num%2, 1-num%2)) for num in range(8)]
for channel, msg in lights:
lc.publish(channel, msg.encode())
time.sleep(.1)
#lc.publish("Match/Init", match_init.encode())
while True:
#grid()
#continue
channel, status_light = make_packet(eval(raw_input("enter (light_number, r, y, g): ")))
lc.publish(channel, status_light.encode())
#lc.subscribe(".*", handle_all)
#TestNode(lc)
# while True:
# pass
|
pioneers/forseti2
|
src/status_lights_tester.py
|
Python
|
apache-2.0
| 1,278
|
import os
import os.path
import sys
import logging
import platform
import time
import signals
import jsonHelper
import globalSignals
from EditorModule import EditorModule, EditorModuleManager
from Project import Project
# from package import PackageManager
from MainModulePath import getMainModulePath
from Command import EditorCommandRegistry
# from InstanceHelper import checkSingleInstance, setRemoteArgumentCallback, sendRemoteMsg
# _GII_BUILTIN_PACKAGES_PATH = 'packages'
# _GII_APP_CONFIG_FILE = 'config.json'
_JUMA_PROJECT_DEFAULT_SETTINGS = 'projectDefaults.json'
_JUMA_EDITOR_PATH = '/editor'
##----------------------------------------------------------------##
class EditorApp(object):
_singleton = None
@staticmethod
def get():
return _singleton
def __init__(self):
assert(not EditorApp._singleton)
EditorApp._singleton = self
EditorModuleManager.get()._app = self
self.defaultMainloopBudget = 0.005
self.initialized = False
self.projectLoaded = False
# self.flagModified = False
# self.debugging = False
self.running = False
self.appPath = getMainModulePath()
self.basePath = self.appPath + _JUMA_EDITOR_PATH
self.dataPaths = []
self.config = {}
self.settings = {}
# self.packageManager = PackageManager()
self.commandRegistry = EditorCommandRegistry.get()
self.registerDataPath( self.getPath('data') )
signals.connect( 'module.register', self.onModuleRegister )
def onModuleRegister(self, m):
if self.running:
logging.info('registered in runtime:'+m.getName())
EditorModuleManager.get().loadModule(m)
def init( self, **options ):
# if options.get( 'stop_other_instance', False ):
# if not checkSingleInstance():
# retryCount = 5
# logging.warning( 'running instance detected, trying to shut it down' )
# sendRemoteMsg( 'shut_down' )
# ready = False
# for i in range( retryCount ):
# time.sleep( 1 )
# if checkSingleInstance():
# ready = True
# break
# if not ready:
# logging.warning( 'timeout for shuting down other instance' )
# return False
# else:
# if not checkSingleInstance():
# logging.warning( 'running instance detected' )
# return False
self.loadConfig()
self.loadSettings()
if self.initialized: return True
self.openProject()
# #scan packages
# excludePackages = self.getProject().getConfig( 'excluded_packages' )
# self.packageManager.addExcludedPackage( excludePackages )
# if not self.packageManager.scanPackages( self.getPath( _GII_BUILTIN_PACKAGES_PATH ) ):
# return False
# if self.getProject().isLoaded():
# self.packageManager.scanPackages( self.getProject().envPackagePath )
# #modules
EditorModuleManager.get().loadAllModules()
signals.emitNow( 'module.loaded' ) #some pre app-ready activities
signals.dispatchAll()
# self.getProject().loadAssetLibrary()
self.initialized = True
self.running = True
# signals.connect( 'app.remote', self.onRemote )
return True
def run( self, **kwargs ):
if not self.initialized:
if not self.init( **kwargs ):
return False
hasError = False
self.resetMainLoopBudget()
try:
signals.emitNow('app.pre_start')
EditorModuleManager.get().startAllModules()
# self.getProject().getAssetLibrary().scanProject()
signals.emitNow('app.start')
signals.dispatchAll()
self.saveConfig()
EditorModuleManager.get().tellAllModulesAppReady()
signals.emit('app.ready')
#main loop
while self.running:
self.doMainLoop()
except Exception, e:
# TODO: popup a alert window?
logging.exception( e )
hasError = True
signals.emitNow('app.close')
signals.dispatchAll()
EditorModuleManager.get().stopAllModules()
if not hasError:
self.getProject().save()
self.getProject().saveConfig()
signals.dispatchAll()
EditorModuleManager.get().unloadAllModules()
return True
def setMainLoopBudget( self, budget = 0.001 ):
self.mainLoopBudget = budget
def resetMainLoopBudget( self ):
return self.setMainLoopBudget( self.defaultMainloopBudget )
def setMinimalMainLoopBudget( self ):
return self.setMainLoopBudget( 0.001 )
def doMainLoop( self ):
budget = self.mainLoopBudget
t0 = time.time()
EditorModuleManager.get().updateAllModules()
tx = time.time()
if signals.dispatchAll():
rest = 0
else:
t1 = time.time()
elapsed = t1 - t0
rest = budget - elapsed
if rest > 0:
time.sleep( rest )
# def tryStop( self, timeout = 0 ):
# #TODO: alert if any asset is not saved
# self.stop()
# return True
def stop( self ):
self.running = False
self.saveConfig()
def loadSettings( self ):
loaded = jsonHelper.tryLoadJSON( self.getPath( 'data/' + _JUMA_PROJECT_DEFAULT_SETTINGS ) )
if loaded:
self.settings = loaded
def getSetting( self, **options ):
project = self.getProject()
settingName = options.get("name", "None")
hasItem = options.get("exists", None)
projSetting = project.info.get(settingName, None)
if projSetting:
if hasItem:
for st in projSetting:
if st.get("name") == hasItem:
return st
else:
return projSetting
projSetting = self.settings.get(settingName, None)
if hasItem:
for st in projSetting:
if st.get("name") == hasItem:
return st
else:
return projSetting
def saveConfig( self ):
pass
# jsonHelper.trySaveJSON( self.config, self.getPath( _GII_APP_CONFIG_FILE ), 'project config' )
def loadConfig( self ):
pass
# loaded = jsonHelper.tryLoadJSON( self.getPath( _GII_APP_CONFIG_FILE ) )
# if loaded:
# config = self.config
# for k, v in loaded.items():
# config[ k ] = v
# else:
# self.saveConfig()
# def setConfig( self, name, value, saveNow = True ):
# self.config[name] = value
# if saveNow:
# self.saveConfig()
# def getConfig( self, name, default = None ):
# return self.config.get( name, default )
# def affirmConfig( self, name, default = None ):
# value = self.config.get( name, None )
# if value == None:
# self.config[ name ] = default
# return default
def getModule(self, name):
return EditorModuleManager.get().getModule( name )
def affirmModule(self, name):
return EditorModuleManager.get().affirmModule( name )
def createCommandStack( self, stackName ):
return self.commandRegistry.createCommandStack( stackName )
def getCommandStack( self, stackName ):
return self.commandRegistry.getCommandStack( stackName )
def clearCommandStack( self, stackName ):
stack = self.commandRegistry.getCommandStack( stackName )
if stack:
stack.clear()
def doCommand( self, fullname, *args, **kwargs ):
return self.commandRegistry.doCommand( fullname, *args, **kwargs )
def undoCommand( self, popOnly = False ):
return self.commandRegistry.undoCommand( popOnly )
def getPath( self, path = None ):
if path:
return self.basePath + '/' + path
else:
return self.basePath
def getPythonPath( self ):
return sys.executable
def findDataFile( self, fileName ):
for path in self.dataPaths:
f = path + '/' + fileName
if os.path.exists( f ):
return f
return None
def registerDataPath( self, dataPath ):
self.dataPaths.append( dataPath )
def getProject( self ):
return Project.get()
def openProject( self, basePath = None ):
proj = Project.get()
if proj.load( basePath ):
self.projectLoaded = True
# if self.projectLoaded: return Project.get()
# info = Project.findProject( basePath )
# if not info:
# raise Exception( 'no valid gii project found' )
# proj = Project.get()
# proj.load( info['path'] )
# self.projectLoaded = True
# self.registerDataPath( proj.getEnvPath('data') )
# return proj
def getAssetLibrary( self ):
return self.getProject().getAssetLibrary()
def getPlatformName( self ):
name = platform.system()
if name == 'Linux':
return 'linux'
elif name == 'Darwin':
return 'osx'
elif name == 'Windows':
return 'win'
else:
raise Exception( 'what platform?' + name )
def getRelPath( self, path ):
proj = self.getProject()
if proj:
path = path.replace(proj.getPath(), "{project}")
return path
def getAbsPath( self, path ):
proj = self.getProject()
if proj:
path = path.replace('{project}', proj.getPath())
return path
##----------------------------------------------------------------##
app = EditorApp()
|
cloudteampro/juma-editor
|
editor/lib/juma/core/EditorApp.py
|
Python
|
mit
| 8,429
|
# Generated from tnsnames.g4 by ANTLR 4.5.1
# encoding: utf-8
from io import StringIO
from antlr4 import *
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3P")
buf.write("\u02dc\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t")
buf.write(";\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\t")
buf.write("D\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\tL\4M\t")
buf.write("M\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\tU\4V\t")
buf.write("V\4W\tW\4X\tX\4Y\tY\4Z\tZ\3\2\3\2\3\2\7\2\u00b8\n\2\f")
buf.write("\2\16\2\u00bb\13\2\3\3\3\3\3\3\3\3\5\3\u00c1\n\3\3\4\3")
buf.write("\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\6\5\u00cc\n\5\r\5\16\5")
buf.write("\u00cd\5\5\u00d0\n\5\3\6\3\6\3\6\3\6\3\6\6\6\u00d7\n\6")
buf.write("\r\6\16\6\u00d8\5\6\u00db\n\6\3\6\3\6\3\7\3\7\3\7\7\7")
buf.write("\u00e2\n\7\f\7\16\7\u00e5\13\7\3\b\3\b\3\t\3\t\3\t\3\t")
buf.write("\5\t\u00ed\n\t\3\t\6\t\u00f0\n\t\r\t\16\t\u00f1\3\t\5")
buf.write("\t\u00f5\n\t\3\t\3\t\3\n\6\n\u00fa\n\n\r\n\16\n\u00fb")
buf.write("\3\13\3\13\3\13\5\13\u0101\n\13\3\f\3\f\3\f\3\f\5\f\u0107")
buf.write("\n\f\3\f\3\f\6\f\u010b\n\f\r\f\16\f\u010c\5\f\u010f\n")
buf.write("\f\3\f\5\f\u0112\n\f\3\f\3\f\5\f\u0116\n\f\3\f\3\f\3\r")
buf.write("\6\r\u011b\n\r\r\r\16\r\u011c\3\16\3\16\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\3\16\3\16\3\16\5\16\u012b\n\16\3")
buf.write("\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26")
buf.write("\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\5\31\u016d")
buf.write("\n\31\3\31\6\31\u0170\n\31\r\31\16\31\u0171\3\31\5\31")
buf.write("\u0175\n\31\3\31\3\31\3\32\6\32\u017a\n\32\r\32\16\32")
buf.write("\u017b\3\33\3\33\3\33\5\33\u0181\n\33\3\34\3\34\3\34\3")
buf.write("\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36")
buf.write("\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37\5\37\u019a")
buf.write("\n\37\3\37\3\37\3 \6 \u019f\n \r \16 \u01a0\3!\3!\5!\u01a5")
buf.write("\n!\3\"\3\"\3\"\3\"\3\"\5\"\u01ac\n\"\3#\3#\3$\6$\u01b1")
buf.write("\n$\r$\16$\u01b2\3%\3%\3%\5%\u01b8\n%\3&\3&\3&\3&\3&\3")
buf.write("&\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3)\3)\3*\3")
buf.write("*\3+\3+\3,\6,\u01d3\n,\r,\16,\u01d4\3-\3-\5-\u01d9\n-")
buf.write("\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3\60\3\60\3\61\6")
buf.write("\61\u01ea\n\61\r\61\16\61\u01eb\3\62\3\62\5\62\u01f0\n")
buf.write("\62\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\65\3\65\3\66\6\66\u0201\n\66\r\66\16\66\u0202")
buf.write("\3\67\3\67\3\67\5\67\u0208\n\67\38\38\38\38\38\38\39\3")
buf.write("9\39\39\39\39\3:\3:\3:\3:\3:\3:\3;\3;\3<\6<\u021f\n<\r")
buf.write("<\16<\u0220\3=\3=\3=\3=\5=\u0227\n=\3>\3>\3>\3>\3>\3>")
buf.write("\3?\3?\3?\3?\3?\3?\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3")
buf.write("A\3B\3B\3B\3B\3C\3C\3C\3C\3C\3C\3D\6D\u024c\nD\rD\16D")
buf.write("\u024d\3E\3E\5E\u0252\nE\3F\3F\3F\3F\3F\3F\3G\3G\3G\3")
buf.write("G\3G\3G\3H\3H\3H\3H\3H\3H\3I\6I\u0267\nI\rI\16I\u0268")
buf.write("\3J\3J\3J\3J\3J\3J\3J\3J\3J\5J\u0274\nJ\3K\3K\3K\3K\3")
buf.write("K\3K\3L\3L\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3N\3N\3N\3N\3")
buf.write("N\3N\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3")
buf.write("Q\3Q\3Q\5Q\u02a1\nQ\3Q\3Q\3Q\3R\3R\3R\3R\3R\3R\3S\3S\3")
buf.write("S\3S\3S\3S\3T\6T\u02b3\nT\rT\16T\u02b4\3U\3U\3U\3U\3U")
buf.write("\5U\u02bc\nU\3V\3V\3V\3V\3V\3V\3W\3W\3W\3W\3W\3W\3X\3")
buf.write("X\3X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Y\3Z\3Z\3Z\3Z\3Z\3Z\3Z\2")
buf.write("\2[\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60")
buf.write("\62\64\668:<>@BDFHJLNPRTVXZ\\^`bdfhjlnprtvxz|~\u0080\u0082")
buf.write("\u0084\u0086\u0088\u008a\u008c\u008e\u0090\u0092\u0094")
buf.write("\u0096\u0098\u009a\u009c\u009e\u00a0\u00a2\u00a4\u00a6")
buf.write("\u00a8\u00aa\u00ac\u00ae\u00b0\u00b2\2\b\3\2\27\31\3\2")
buf.write("\27\30\4\2\26\26OO\3\2\35\37\3\2FH\3\2JK\u02cf\2\u00b9")
buf.write("\3\2\2\2\4\u00bc\3\2\2\2\6\u00c2\3\2\2\2\b\u00c6\3\2\2")
buf.write("\2\n\u00d1\3\2\2\2\f\u00de\3\2\2\2\16\u00e6\3\2\2\2\20")
buf.write("\u00e8\3\2\2\2\22\u00f9\3\2\2\2\24\u0100\3\2\2\2\26\u0102")
buf.write("\3\2\2\2\30\u011a\3\2\2\2\32\u012a\3\2\2\2\34\u012c\3")
buf.write("\2\2\2\36\u0132\3\2\2\2 \u0138\3\2\2\2\"\u013e\3\2\2\2")
buf.write("$\u0144\3\2\2\2&\u014a\3\2\2\2(\u0150\3\2\2\2*\u0156\3")
buf.write("\2\2\2,\u015c\3\2\2\2.\u0162\3\2\2\2\60\u0168\3\2\2\2")
buf.write("\62\u0179\3\2\2\2\64\u0180\3\2\2\2\66\u0182\3\2\2\28\u0188")
buf.write("\3\2\2\2:\u018e\3\2\2\2<\u0194\3\2\2\2>\u019e\3\2\2\2")
buf.write("@\u01a4\3\2\2\2B\u01ab\3\2\2\2D\u01ad\3\2\2\2F\u01b0\3")
buf.write("\2\2\2H\u01b7\3\2\2\2J\u01b9\3\2\2\2L\u01bf\3\2\2\2N\u01c5")
buf.write("\3\2\2\2P\u01cb\3\2\2\2R\u01cd\3\2\2\2T\u01cf\3\2\2\2")
buf.write("V\u01d2\3\2\2\2X\u01d8\3\2\2\2Z\u01da\3\2\2\2\\\u01e0")
buf.write("\3\2\2\2^\u01e6\3\2\2\2`\u01e9\3\2\2\2b\u01ef\3\2\2\2")
buf.write("d\u01f1\3\2\2\2f\u01f7\3\2\2\2h\u01fd\3\2\2\2j\u0200\3")
buf.write("\2\2\2l\u0207\3\2\2\2n\u0209\3\2\2\2p\u020f\3\2\2\2r\u0215")
buf.write("\3\2\2\2t\u021b\3\2\2\2v\u021e\3\2\2\2x\u0226\3\2\2\2")
buf.write("z\u0228\3\2\2\2|\u022e\3\2\2\2~\u0234\3\2\2\2\u0080\u023a")
buf.write("\3\2\2\2\u0082\u0240\3\2\2\2\u0084\u0244\3\2\2\2\u0086")
buf.write("\u024b\3\2\2\2\u0088\u0251\3\2\2\2\u008a\u0253\3\2\2\2")
buf.write("\u008c\u0259\3\2\2\2\u008e\u025f\3\2\2\2\u0090\u0266\3")
buf.write("\2\2\2\u0092\u0273\3\2\2\2\u0094\u0275\3\2\2\2\u0096\u027b")
buf.write("\3\2\2\2\u0098\u0281\3\2\2\2\u009a\u0287\3\2\2\2\u009c")
buf.write("\u028d\3\2\2\2\u009e\u0293\3\2\2\2\u00a0\u0299\3\2\2\2")
buf.write("\u00a2\u02a5\3\2\2\2\u00a4\u02ab\3\2\2\2\u00a6\u02b2\3")
buf.write("\2\2\2\u00a8\u02bb\3\2\2\2\u00aa\u02bd\3\2\2\2\u00ac\u02c3")
buf.write("\3\2\2\2\u00ae\u02c9\3\2\2\2\u00b0\u02cf\3\2\2\2\u00b2")
buf.write("\u02d5\3\2\2\2\u00b4\u00b8\5\4\3\2\u00b5\u00b8\5\6\4\2")
buf.write("\u00b6\u00b8\5\b\5\2\u00b7\u00b4\3\2\2\2\u00b7\u00b5\3")
buf.write("\2\2\2\u00b7\u00b6\3\2\2\2\u00b8\u00bb\3\2\2\2\u00b9\u00b7")
buf.write("\3\2\2\2\u00b9\u00ba\3\2\2\2\u00ba\3\3\2\2\2\u00bb\u00b9")
buf.write("\3\2\2\2\u00bc\u00bd\5\f\7\2\u00bd\u00c0\7\7\2\2\u00be")
buf.write("\u00c1\5\20\t\2\u00bf\u00c1\5\26\f\2\u00c0\u00be\3\2\2")
buf.write("\2\u00c0\u00bf\3\2\2\2\u00c1\5\3\2\2\2\u00c2\u00c3\7:")
buf.write("\2\2\u00c3\u00c4\7\7\2\2\u00c4\u00c5\7;\2\2\u00c5\7\3")
buf.write("\2\2\2\u00c6\u00c7\5\16\b\2\u00c7\u00cf\7\7\2\2\u00c8")
buf.write("\u00d0\5\n\6\2\u00c9\u00d0\5\60\31\2\u00ca\u00cc\5<\37")
buf.write("\2\u00cb\u00ca\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00cb")
buf.write("\3\2\2\2\u00cd\u00ce\3\2\2\2\u00ce\u00d0\3\2\2\2\u00cf")
buf.write("\u00c8\3\2\2\2\u00cf\u00c9\3\2\2\2\u00cf\u00cb\3\2\2\2")
buf.write("\u00d0\t\3\2\2\2\u00d1\u00d2\7\3\2\2\u00d2\u00d3\7\16")
buf.write("\2\2\u00d3\u00da\7\7\2\2\u00d4\u00db\5\60\31\2\u00d5\u00d7")
buf.write("\5<\37\2\u00d6\u00d5\3\2\2\2\u00d7\u00d8\3\2\2\2\u00d8")
buf.write("\u00d6\3\2\2\2\u00d8\u00d9\3\2\2\2\u00d9\u00db\3\2\2\2")
buf.write("\u00da\u00d4\3\2\2\2\u00da\u00d6\3\2\2\2\u00db\u00dc\3")
buf.write("\2\2\2\u00dc\u00dd\7\4\2\2\u00dd\13\3\2\2\2\u00de\u00e3")
buf.write("\5\16\b\2\u00df\u00e0\7\t\2\2\u00e0\u00e2\5\16\b\2\u00e1")
buf.write("\u00df\3\2\2\2\u00e2\u00e5\3\2\2\2\u00e3\u00e1\3\2\2\2")
buf.write("\u00e3\u00e4\3\2\2\2\u00e4\r\3\2\2\2\u00e5\u00e3\3\2\2")
buf.write("\2\u00e6\u00e7\7O\2\2\u00e7\17\3\2\2\2\u00e8\u00e9\7\3")
buf.write("\2\2\u00e9\u00ea\7\r\2\2\u00ea\u00ec\7\7\2\2\u00eb\u00ed")
buf.write("\5\22\n\2\u00ec\u00eb\3\2\2\2\u00ec\u00ed\3\2\2\2\u00ed")
buf.write("\u00ef\3\2\2\2\u00ee\u00f0\5\26\f\2\u00ef\u00ee\3\2\2")
buf.write("\2\u00f0\u00f1\3\2\2\2\u00f1\u00ef\3\2\2\2\u00f1\u00f2")
buf.write("\3\2\2\2\u00f2\u00f4\3\2\2\2\u00f3\u00f5\5\22\n\2\u00f4")
buf.write("\u00f3\3\2\2\2\u00f4\u00f5\3\2\2\2\u00f5\u00f6\3\2\2\2")
buf.write("\u00f6\u00f7\7\4\2\2\u00f7\21\3\2\2\2\u00f8\u00fa\5\24")
buf.write("\13\2\u00f9\u00f8\3\2\2\2\u00fa\u00fb\3\2\2\2\u00fb\u00f9")
buf.write("\3\2\2\2\u00fb\u00fc\3\2\2\2\u00fc\23\3\2\2\2\u00fd\u0101")
buf.write("\5\66\34\2\u00fe\u0101\58\35\2\u00ff\u0101\5:\36\2\u0100")
buf.write("\u00fd\3\2\2\2\u0100\u00fe\3\2\2\2\u0100\u00ff\3\2\2\2")
buf.write("\u0101\25\3\2\2\2\u0102\u0103\7\3\2\2\u0103\u0104\7\16")
buf.write("\2\2\u0104\u0106\7\7\2\2\u0105\u0107\5\30\r\2\u0106\u0105")
buf.write("\3\2\2\2\u0106\u0107\3\2\2\2\u0107\u010e\3\2\2\2\u0108")
buf.write("\u010f\5\60\31\2\u0109\u010b\5<\37\2\u010a\u0109\3\2\2")
buf.write("\2\u010b\u010c\3\2\2\2\u010c\u010a\3\2\2\2\u010c\u010d")
buf.write("\3\2\2\2\u010d\u010f\3\2\2\2\u010e\u0108\3\2\2\2\u010e")
buf.write("\u010a\3\2\2\2\u010f\u0111\3\2\2\2\u0110\u0112\5\30\r")
buf.write("\2\u0111\u0110\3\2\2\2\u0111\u0112\3\2\2\2\u0112\u0113")
buf.write("\3\2\2\2\u0113\u0115\5\u008eH\2\u0114\u0116\5\30\r\2\u0115")
buf.write("\u0114\3\2\2\2\u0115\u0116\3\2\2\2\u0116\u0117\3\2\2\2")
buf.write("\u0117\u0118\7\4\2\2\u0118\27\3\2\2\2\u0119\u011b\5\32")
buf.write("\16\2\u011a\u0119\3\2\2\2\u011b\u011c\3\2\2\2\u011c\u011a")
buf.write("\3\2\2\2\u011c\u011d\3\2\2\2\u011d\31\3\2\2\2\u011e\u012b")
buf.write("\5\34\17\2\u011f\u012b\5\66\34\2\u0120\u012b\58\35\2\u0121")
buf.write("\u012b\5\36\20\2\u0122\u012b\5 \21\2\u0123\u012b\5\"\22")
buf.write("\2\u0124\u012b\5:\36\2\u0125\u012b\5$\23\2\u0126\u012b")
buf.write("\5&\24\2\u0127\u012b\5(\25\2\u0128\u012b\5*\26\2\u0129")
buf.write("\u012b\5,\27\2\u012a\u011e\3\2\2\2\u012a\u011f\3\2\2\2")
buf.write("\u012a\u0120\3\2\2\2\u012a\u0121\3\2\2\2\u012a\u0122\3")
buf.write("\2\2\2\u012a\u0123\3\2\2\2\u012a\u0124\3\2\2\2\u012a\u0125")
buf.write("\3\2\2\2\u012a\u0126\3\2\2\2\u012a\u0127\3\2\2\2\u012a")
buf.write("\u0128\3\2\2\2\u012a\u0129\3\2\2\2\u012b\33\3\2\2\2\u012c")
buf.write("\u012d\7\3\2\2\u012d\u012e\7$\2\2\u012e\u012f\7\7\2\2")
buf.write("\u012f\u0130\7%\2\2\u0130\u0131\7\4\2\2\u0131\35\3\2\2")
buf.write("\2\u0132\u0133\7\3\2\2\u0133\u0134\7&\2\2\u0134\u0135")
buf.write("\7\7\2\2\u0135\u0136\7\33\2\2\u0136\u0137\7\4\2\2\u0137")
buf.write("\37\3\2\2\2\u0138\u0139\7\3\2\2\u0139\u013a\7\'\2\2\u013a")
buf.write("\u013b\7\7\2\2\u013b\u013c\7\33\2\2\u013c\u013d\7\4\2")
buf.write("\2\u013d!\3\2\2\2\u013e\u013f\7\3\2\2\u013f\u0140\7(\2")
buf.write("\2\u0140\u0141\7\7\2\2\u0141\u0142\7\33\2\2\u0142\u0143")
buf.write("\7\4\2\2\u0143#\3\2\2\2\u0144\u0145\7\3\2\2\u0145\u0146")
buf.write("\7+\2\2\u0146\u0147\7\7\2\2\u0147\u0148\7O\2\2\u0148\u0149")
buf.write("\7\4\2\2\u0149%\3\2\2\2\u014a\u014b\7\3\2\2\u014b\u014c")
buf.write("\7\65\2\2\u014c\u014d\7\7\2\2\u014d\u014e\5.\30\2\u014e")
buf.write("\u014f\7\4\2\2\u014f\'\3\2\2\2\u0150\u0151\7\3\2\2\u0151")
buf.write("\u0152\7\67\2\2\u0152\u0153\7\7\2\2\u0153\u0154\7\33\2")
buf.write("\2\u0154\u0155\7\4\2\2\u0155)\3\2\2\2\u0156\u0157\7\3")
buf.write("\2\2\u0157\u0158\78\2\2\u0158\u0159\7\7\2\2\u0159\u015a")
buf.write("\7\33\2\2\u015a\u015b\7\4\2\2\u015b+\3\2\2\2\u015c\u015d")
buf.write("\7\3\2\2\u015d\u015e\79\2\2\u015e\u015f\7\7\2\2\u015f")
buf.write("\u0160\7\33\2\2\u0160\u0161\7\4\2\2\u0161-\3\2\2\2\u0162")
buf.write("\u0163\7\3\2\2\u0163\u0164\7\66\2\2\u0164\u0165\7\7\2")
buf.write("\2\u0165\u0166\7;\2\2\u0166\u0167\7\4\2\2\u0167/\3\2\2")
buf.write("\2\u0168\u0169\7\3\2\2\u0169\u016a\7\17\2\2\u016a\u016c")
buf.write("\7\7\2\2\u016b\u016d\5\62\32\2\u016c\u016b\3\2\2\2\u016c")
buf.write("\u016d\3\2\2\2\u016d\u016f\3\2\2\2\u016e\u0170\5<\37\2")
buf.write("\u016f\u016e\3\2\2\2\u0170\u0171\3\2\2\2\u0171\u016f\3")
buf.write("\2\2\2\u0171\u0172\3\2\2\2\u0172\u0174\3\2\2\2\u0173\u0175")
buf.write("\5\62\32\2\u0174\u0173\3\2\2\2\u0174\u0175\3\2\2\2\u0175")
buf.write("\u0176\3\2\2\2\u0176\u0177\7\4\2\2\u0177\61\3\2\2\2\u0178")
buf.write("\u017a\5\64\33\2\u0179\u0178\3\2\2\2\u017a\u017b\3\2\2")
buf.write("\2\u017b\u0179\3\2\2\2\u017b\u017c\3\2\2\2\u017c\63\3")
buf.write("\2\2\2\u017d\u0181\5\66\34\2\u017e\u0181\58\35\2\u017f")
buf.write("\u0181\5:\36\2\u0180\u017d\3\2\2\2\u0180\u017e\3\2\2\2")
buf.write("\u0180\u017f\3\2\2\2\u0181\65\3\2\2\2\u0182\u0183\7\3")
buf.write("\2\2\u0183\u0184\7!\2\2\u0184\u0185\7\7\2\2\u0185\u0186")
buf.write("\t\2\2\2\u0186\u0187\7\4\2\2\u0187\67\3\2\2\2\u0188\u0189")
buf.write("\7\3\2\2\u0189\u018a\7 \2\2\u018a\u018b\7\7\2\2\u018b")
buf.write("\u018c\t\2\2\2\u018c\u018d\7\4\2\2\u018d9\3\2\2\2\u018e")
buf.write("\u018f\7\3\2\2\u018f\u0190\7)\2\2\u0190\u0191\7\7\2\2")
buf.write("\u0191\u0192\t\3\2\2\u0192\u0193\7\4\2\2\u0193;\3\2\2")
buf.write("\2\u0194\u0195\7\3\2\2\u0195\u0196\7\20\2\2\u0196\u0197")
buf.write("\7\7\2\2\u0197\u0199\5B\"\2\u0198\u019a\5> \2\u0199\u0198")
buf.write("\3\2\2\2\u0199\u019a\3\2\2\2\u019a\u019b\3\2\2\2\u019b")
buf.write("\u019c\7\4\2\2\u019c=\3\2\2\2\u019d\u019f\5@!\2\u019e")
buf.write("\u019d\3\2\2\2\u019f\u01a0\3\2\2\2\u01a0\u019e\3\2\2\2")
buf.write("\u01a0\u01a1\3\2\2\2\u01a1?\3\2\2\2\u01a2\u01a5\5\"\22")
buf.write("\2\u01a3\u01a5\5 \21\2\u01a4\u01a2\3\2\2\2\u01a4\u01a3")
buf.write("\3\2\2\2\u01a5A\3\2\2\2\u01a6\u01ac\5D#\2\u01a7\u01ac")
buf.write("\5T+\2\u01a8\u01ac\5^\60\2\u01a9\u01ac\5h\65\2\u01aa\u01ac")
buf.write("\5t;\2\u01ab\u01a6\3\2\2\2\u01ab\u01a7\3\2\2\2\u01ab\u01a8")
buf.write("\3\2\2\2\u01ab\u01a9\3\2\2\2\u01ab\u01aa\3\2\2\2\u01ac")
buf.write("C\3\2\2\2\u01ad\u01ae\5F$\2\u01aeE\3\2\2\2\u01af\u01b1")
buf.write("\5H%\2\u01b0\u01af\3\2\2\2\u01b1\u01b2\3\2\2\2\u01b2\u01b0")
buf.write("\3\2\2\2\u01b2\u01b3\3\2\2\2\u01b3G\3\2\2\2\u01b4\u01b8")
buf.write("\5J&\2\u01b5\u01b8\5L\'\2\u01b6\u01b8\5N(\2\u01b7\u01b4")
buf.write("\3\2\2\2\u01b7\u01b5\3\2\2\2\u01b7\u01b6\3\2\2\2\u01b8")
buf.write("I\3\2\2\2\u01b9\u01ba\7\3\2\2\u01ba\u01bb\7\23\2\2\u01bb")
buf.write("\u01bc\7\7\2\2\u01bc\u01bd\5P)\2\u01bd\u01be\7\4\2\2\u01be")
buf.write("K\3\2\2\2\u01bf\u01c0\7\3\2\2\u01c0\u01c1\7\24\2\2\u01c1")
buf.write("\u01c2\7\7\2\2\u01c2\u01c3\5R*\2\u01c3\u01c4\7\4\2\2\u01c4")
buf.write("M\3\2\2\2\u01c5\u01c6\7\3\2\2\u01c6\u01c7\7\21\2\2\u01c7")
buf.write("\u01c8\7\7\2\2\u01c8\u01c9\7\22\2\2\u01c9\u01ca\7\4\2")
buf.write("\2\u01caO\3\2\2\2\u01cb\u01cc\t\4\2\2\u01ccQ\3\2\2\2\u01cd")
buf.write("\u01ce\7\33\2\2\u01ceS\3\2\2\2\u01cf\u01d0\5V,\2\u01d0")
buf.write("U\3\2\2\2\u01d1\u01d3\5X-\2\u01d2\u01d1\3\2\2\2\u01d3")
buf.write("\u01d4\3\2\2\2\u01d4\u01d2\3\2\2\2\u01d4\u01d5\3\2\2\2")
buf.write("\u01d5W\3\2\2\2\u01d6\u01d9\5Z.\2\u01d7\u01d9\5\\/\2\u01d8")
buf.write("\u01d6\3\2\2\2\u01d8\u01d7\3\2\2\2\u01d9Y\3\2\2\2\u01da")
buf.write("\u01db\7\3\2\2\u01db\u01dc\7\21\2\2\u01dc\u01dd\7\7\2")
buf.write("\2\u01dd\u01de\7-\2\2\u01de\u01df\7\4\2\2\u01df[\3\2\2")
buf.write("\2\u01e0\u01e1\7\3\2\2\u01e1\u01e2\7,\2\2\u01e2\u01e3")
buf.write("\7\7\2\2\u01e3\u01e4\7O\2\2\u01e4\u01e5\7\4\2\2\u01e5")
buf.write("]\3\2\2\2\u01e6\u01e7\5`\61\2\u01e7_\3\2\2\2\u01e8\u01ea")
buf.write("\5b\62\2\u01e9\u01e8\3\2\2\2\u01ea\u01eb\3\2\2\2\u01eb")
buf.write("\u01e9\3\2\2\2\u01eb\u01ec\3\2\2\2\u01eca\3\2\2\2\u01ed")
buf.write("\u01f0\5d\63\2\u01ee\u01f0\5f\64\2\u01ef\u01ed\3\2\2\2")
buf.write("\u01ef\u01ee\3\2\2\2\u01f0c\3\2\2\2\u01f1\u01f2\7\3\2")
buf.write("\2\u01f2\u01f3\7\21\2\2\u01f3\u01f4\7\7\2\2\u01f4\u01f5")
buf.write("\7.\2\2\u01f5\u01f6\7\4\2\2\u01f6e\3\2\2\2\u01f7\u01f8")
buf.write("\7\3\2\2\u01f8\u01f9\7*\2\2\u01f9\u01fa\7\7\2\2\u01fa")
buf.write("\u01fb\7O\2\2\u01fb\u01fc\7\4\2\2\u01fcg\3\2\2\2\u01fd")
buf.write("\u01fe\5j\66\2\u01fei\3\2\2\2\u01ff\u0201\5l\67\2\u0200")
buf.write("\u01ff\3\2\2\2\u0201\u0202\3\2\2\2\u0202\u0200\3\2\2\2")
buf.write("\u0202\u0203\3\2\2\2\u0203k\3\2\2\2\u0204\u0208\5n8\2")
buf.write("\u0205\u0208\5p9\2\u0206\u0208\5r:\2\u0207\u0204\3\2\2")
buf.write("\2\u0207\u0205\3\2\2\2\u0207\u0206\3\2\2\2\u0208m\3\2")
buf.write("\2\2\u0209\u020a\7\3\2\2\u020a\u020b\7\21\2\2\u020b\u020c")
buf.write("\7\7\2\2\u020c\u020d\7/\2\2\u020d\u020e\7\4\2\2\u020e")
buf.write("o\3\2\2\2\u020f\u0210\7\3\2\2\u0210\u0211\7C\2\2\u0211")
buf.write("\u0212\7\7\2\2\u0212\u0213\7O\2\2\u0213\u0214\7\4\2\2")
buf.write("\u0214q\3\2\2\2\u0215\u0216\7\3\2\2\u0216\u0217\7\61\2")
buf.write("\2\u0217\u0218\7\7\2\2\u0218\u0219\7O\2\2\u0219\u021a")
buf.write("\7\4\2\2\u021as\3\2\2\2\u021b\u021c\5v<\2\u021cu\3\2\2")
buf.write("\2\u021d\u021f\5x=\2\u021e\u021d\3\2\2\2\u021f\u0220\3")
buf.write("\2\2\2\u0220\u021e\3\2\2\2\u0220\u0221\3\2\2\2\u0221w")
buf.write("\3\2\2\2\u0222\u0227\5z>\2\u0223\u0227\5|?\2\u0224\u0227")
buf.write("\5~@\2\u0225\u0227\5\u0080A\2\u0226\u0222\3\2\2\2\u0226")
buf.write("\u0223\3\2\2\2\u0226\u0224\3\2\2\2\u0226\u0225\3\2\2\2")
buf.write("\u0227y\3\2\2\2\u0228\u0229\7\3\2\2\u0229\u022a\7\21\2")
buf.write("\2\u022a\u022b\7\7\2\2\u022b\u022c\7\60\2\2\u022c\u022d")
buf.write("\7\4\2\2\u022d{\3\2\2\2\u022e\u022f\7\3\2\2\u022f\u0230")
buf.write("\7\62\2\2\u0230\u0231\7\7\2\2\u0231\u0232\7O\2\2\u0232")
buf.write("\u0233\7\4\2\2\u0233}\3\2\2\2\u0234\u0235\7\3\2\2\u0235")
buf.write("\u0236\7\63\2\2\u0236\u0237\7\7\2\2\u0237\u0238\7O\2\2")
buf.write("\u0238\u0239\7\4\2\2\u0239\177\3\2\2\2\u023a\u023b\7\3")
buf.write("\2\2\u023b\u023c\7\64\2\2\u023c\u023d\7\7\2\2\u023d\u023e")
buf.write("\5\u0082B\2\u023e\u023f\7\4\2\2\u023f\u0081\3\2\2\2\u0240")
buf.write("\u0241\7\13\2\2\u0241\u0242\5\u0084C\2\u0242\u0243\7\13")
buf.write("\2\2\u0243\u0083\3\2\2\2\u0244\u0245\7\3\2\2\u0245\u0246")
buf.write("\7\16\2\2\u0246\u0247\7\7\2\2\u0247\u0248\5\u0086D\2\u0248")
buf.write("\u0249\7\4\2\2\u0249\u0085\3\2\2\2\u024a\u024c\5\u0088")
buf.write("E\2\u024b\u024a\3\2\2\2\u024c\u024d\3\2\2\2\u024d\u024b")
buf.write("\3\2\2\2\u024d\u024e\3\2\2\2\u024e\u0087\3\2\2\2\u024f")
buf.write("\u0252\5\u008aF\2\u0250\u0252\5\u008cG\2\u0251\u024f\3")
buf.write("\2\2\2\u0251\u0250\3\2\2\2\u0252\u0089\3\2\2\2\u0253\u0254")
buf.write("\7\3\2\2\u0254\u0255\7\25\2\2\u0255\u0256\7\7\2\2\u0256")
buf.write("\u0257\7\27\2\2\u0257\u0258\7\4\2\2\u0258\u008b\3\2\2")
buf.write("\2\u0259\u025a\7\3\2\2\u025a\u025b\7\20\2\2\u025b\u025c")
buf.write("\7\7\2\2\u025c\u025d\5z>\2\u025d\u025e\7\4\2\2\u025e\u008d")
buf.write("\3\2\2\2\u025f\u0260\7\3\2\2\u0260\u0261\7\f\2\2\u0261")
buf.write("\u0262\7\7\2\2\u0262\u0263\5\u0090I\2\u0263\u0264\7\4")
buf.write("\2\2\u0264\u008f\3\2\2\2\u0265\u0267\5\u0092J\2\u0266")
buf.write("\u0265\3\2\2\2\u0267\u0268\3\2\2\2\u0268\u0266\3\2\2\2")
buf.write("\u0268\u0269\3\2\2\2\u0269\u0091\3\2\2\2\u026a\u0274\5")
buf.write("\u0094K\2\u026b\u0274\5\u0096L\2\u026c\u0274\5\u0098M")
buf.write("\2\u026d\u0274\5\u009aN\2\u026e\u0274\5\u009cO\2\u026f")
buf.write("\u0274\5\u009eP\2\u0270\u0274\5\u00a0Q\2\u0271\u0274\5")
buf.write("\u00a2R\2\u0272\u0274\5\u00a4S\2\u0273\u026a\3\2\2\2\u0273")
buf.write("\u026b\3\2\2\2\u0273\u026c\3\2\2\2\u0273\u026d\3\2\2\2")
buf.write("\u0273\u026e\3\2\2\2\u0273\u026f\3\2\2\2\u0273\u0270\3")
buf.write("\2\2\2\u0273\u0271\3\2\2\2\u0273\u0272\3\2\2\2\u0274\u0093")
buf.write("\3\2\2\2\u0275\u0276\7\3\2\2\u0276\u0277\7<\2\2\u0277")
buf.write("\u0278\7\7\2\2\u0278\u0279\7O\2\2\u0279\u027a\7\4\2\2")
buf.write("\u027a\u0095\3\2\2\2\u027b\u027c\7\3\2\2\u027c\u027d\7")
buf.write("=\2\2\u027d\u027e\7\7\2\2\u027e\u027f\7O\2\2\u027f\u0280")
buf.write("\7\4\2\2\u0280\u0097\3\2\2\2\u0281\u0282\7\3\2\2\u0282")
buf.write("\u0283\7>\2\2\u0283\u0284\7\7\2\2\u0284\u0285\7O\2\2\u0285")
buf.write("\u0286\7\4\2\2\u0286\u0099\3\2\2\2\u0287\u0288\7\3\2\2")
buf.write("\u0288\u0289\7?\2\2\u0289\u028a\7\7\2\2\u028a\u028b\5")
buf.write("\u00a6T\2\u028b\u028c\7\4\2\2\u028c\u009b\3\2\2\2\u028d")
buf.write("\u028e\7\3\2\2\u028e\u028f\7@\2\2\u028f\u0290\7\7\2\2")
buf.write("\u0290\u0291\7O\2\2\u0291\u0292\7\4\2\2\u0292\u009d\3")
buf.write("\2\2\2\u0293\u0294\7\3\2\2\u0294\u0295\7A\2\2\u0295\u0296")
buf.write("\7\7\2\2\u0296\u0297\7\34\2\2\u0297\u0298\7\4\2\2\u0298")
buf.write("\u009f\3\2\2\2\u0299\u029a\7\3\2\2\u029a\u029b\7B\2\2")
buf.write("\u029b\u02a0\7\7\2\2\u029c\u029d\7\5\2\2\u029d\u029e\7")
buf.write("\b\2\2\u029e\u029f\7O\2\2\u029f\u02a1\7\6\2\2\u02a0\u029c")
buf.write("\3\2\2\2\u02a0\u02a1\3\2\2\2\u02a1\u02a2\3\2\2\2\u02a2")
buf.write("\u02a3\7O\2\2\u02a3\u02a4\7\4\2\2\u02a4\u00a1\3\2\2\2")
buf.write("\u02a5\u02a6\7\3\2\2\u02a6\u02a7\7C\2\2\u02a7\u02a8\7")
buf.write("\7\2\2\u02a8\u02a9\t\5\2\2\u02a9\u02aa\7\4\2\2\u02aa\u00a3")
buf.write("\3\2\2\2\u02ab\u02ac\7\3\2\2\u02ac\u02ad\7\"\2\2\u02ad")
buf.write("\u02ae\7\7\2\2\u02ae\u02af\7#\2\2\u02af\u02b0\7\4\2\2")
buf.write("\u02b0\u00a5\3\2\2\2\u02b1\u02b3\5\u00a8U\2\u02b2\u02b1")
buf.write("\3\2\2\2\u02b3\u02b4\3\2\2\2\u02b4\u02b2\3\2\2\2\u02b4")
buf.write("\u02b5\3\2\2\2\u02b5\u00a7\3\2\2\2\u02b6\u02bc\5\u00aa")
buf.write("V\2\u02b7\u02bc\5\u00acW\2\u02b8\u02bc\5\u00aeX\2\u02b9")
buf.write("\u02bc\5\u00b0Y\2\u02ba\u02bc\5\u00b2Z\2\u02bb\u02b6\3")
buf.write("\2\2\2\u02bb\u02b7\3\2\2\2\u02bb\u02b8\3\2\2\2\u02bb\u02b9")
buf.write("\3\2\2\2\u02bb\u02ba\3\2\2\2\u02bc\u00a9\3\2\2\2\u02bd")
buf.write("\u02be\7\3\2\2\u02be\u02bf\7E\2\2\u02bf\u02c0\7\7\2\2")
buf.write("\u02c0\u02c1\t\6\2\2\u02c1\u02c2\7\4\2\2\u02c2\u00ab\3")
buf.write("\2\2\2\u02c3\u02c4\7\3\2\2\u02c4\u02c5\7D\2\2\u02c5\u02c6")
buf.write("\7\7\2\2\u02c6\u02c7\7O\2\2\u02c7\u02c8\7\4\2\2\u02c8")
buf.write("\u00ad\3\2\2\2\u02c9\u02ca\7\3\2\2\u02ca\u02cb\7I\2\2")
buf.write("\u02cb\u02cc\7\7\2\2\u02cc\u02cd\t\7\2\2\u02cd\u02ce\7")
buf.write("\4\2\2\u02ce\u00af\3\2\2\2\u02cf\u02d0\7\3\2\2\u02d0\u02d1")
buf.write("\7L\2\2\u02d1\u02d2\7\7\2\2\u02d2\u02d3\7\33\2\2\u02d3")
buf.write("\u02d4\7\4\2\2\u02d4\u00b1\3\2\2\2\u02d5\u02d6\7\3\2\2")
buf.write("\u02d6\u02d7\7M\2\2\u02d7\u02d8\7\7\2\2\u02d8\u02d9\7")
buf.write("\33\2\2\u02d9\u02da\7\4\2\2\u02da\u00b3\3\2\2\2\60\u00b7")
buf.write("\u00b9\u00c0\u00cd\u00cf\u00d8\u00da\u00e3\u00ec\u00f1")
buf.write("\u00f4\u00fb\u0100\u0106\u010c\u010e\u0111\u0115\u011c")
buf.write("\u012a\u016c\u0171\u0174\u017b\u0180\u0199\u01a0\u01a4")
buf.write("\u01ab\u01b2\u01b7\u01d4\u01d8\u01eb\u01ef\u0202\u0207")
buf.write("\u0220\u0226\u024d\u0251\u0268\u0273\u02a0\u02b4\u02bb")
return buf.getvalue()
class tnsnamesParser(Parser):
grammarFileName = "tnsnames.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
sharedContextCache = PredictionContextCache()
literalNames = ["<INVALID>", "'('", "')'", "'['", "']'", "'='", "'.'",
"','", "'\"'", "'''"]
symbolicNames = ["<INVALID>", "L_PAREN", "R_PAREN", "L_SQUARE", "R_SQUARE",
"EQUAL", "DOT", "COMMA", "D_QUOTE", "S_QUOTE", "CONNECT_DATA",
"DESCRIPTION_LIST", "DESCRIPTION", "ADDRESS_LIST",
"ADDRESS", "PROTOCOL", "TCP", "HOST", "PORT", "LOCAL",
"IP", "YES_NO", "ON_OFF", "TRUE_FALSE", "COMMENT",
"INT", "OK", "DEDICATED", "SHARED", "POOLED", "LOAD_BALANCE",
"FAILOVER", "UR", "UR_A", "ENABLE", "BROKEN", "SDU",
"RECV_BUF", "SEND_BUF", "SOURCE_ROUTE", "SERVICE",
"SERVICE_TYPE", "KEY", "IPC", "SPX", "NMP", "BEQ",
"PIPE", "PROGRAM", "ARGV0", "ARGS", "SECURITY", "SSL_CERT",
"CONN_TIMEOUT", "RETRY_COUNT", "TCT", "IFILE", "DQ_STRING",
"SERVICE_NAME", "SID", "INSTANCE_NAME", "FAILOVER_MODE",
"GLOBAL_NAME", "HS", "RDB_DATABASE", "SERVER", "BACKUP",
"TYPE", "SESSION", "SELECT", "NONE", "METHOD", "BASIC",
"PRECONNECT", "RETRIES", "DELAY", "QUAD", "ID", "WS"]
RULE_tnsnames = 0
RULE_tns_entry = 1
RULE_ifile = 2
RULE_lsnr_entry = 3
RULE_lsnr_description = 4
RULE_alias_list = 5
RULE_alias = 6
RULE_description_list = 7
RULE_dl_params = 8
RULE_dl_parameter = 9
RULE_description = 10
RULE_d_params = 11
RULE_d_parameter = 12
RULE_d_enable = 13
RULE_d_sdu = 14
RULE_d_recv_buf = 15
RULE_d_send_buf = 16
RULE_d_service_type = 17
RULE_d_security = 18
RULE_d_conn_timeout = 19
RULE_d_retry_count = 20
RULE_d_tct = 21
RULE_ds_parameter = 22
RULE_address_list = 23
RULE_al_params = 24
RULE_al_parameter = 25
RULE_al_failover = 26
RULE_al_load_balance = 27
RULE_al_source_route = 28
RULE_address = 29
RULE_a_params = 30
RULE_a_parameter = 31
RULE_protocol_info = 32
RULE_tcp_protocol = 33
RULE_tcp_params = 34
RULE_tcp_parameter = 35
RULE_tcp_host = 36
RULE_tcp_port = 37
RULE_tcp_tcp = 38
RULE_host = 39
RULE_port = 40
RULE_ipc_protocol = 41
RULE_ipc_params = 42
RULE_ipc_parameter = 43
RULE_ipc_ipc = 44
RULE_ipc_key = 45
RULE_spx_protocol = 46
RULE_spx_params = 47
RULE_spx_parameter = 48
RULE_spx_spx = 49
RULE_spx_service = 50
RULE_nmp_protocol = 51
RULE_nmp_params = 52
RULE_nmp_parameter = 53
RULE_nmp_nmp = 54
RULE_nmp_server = 55
RULE_nmp_pipe = 56
RULE_beq_protocol = 57
RULE_beq_params = 58
RULE_beq_parameter = 59
RULE_beq_beq = 60
RULE_beq_program = 61
RULE_beq_argv0 = 62
RULE_beq_args = 63
RULE_ba_parameter = 64
RULE_ba_description = 65
RULE_bad_params = 66
RULE_bad_parameter = 67
RULE_bad_local = 68
RULE_bad_address = 69
RULE_connect_data = 70
RULE_cd_params = 71
RULE_cd_parameter = 72
RULE_cd_service_name = 73
RULE_cd_sid = 74
RULE_cd_instance_name = 75
RULE_cd_failover_mode = 76
RULE_cd_global_name = 77
RULE_cd_hs = 78
RULE_cd_rdb_database = 79
RULE_cd_server = 80
RULE_cd_ur = 81
RULE_fo_params = 82
RULE_fo_parameter = 83
RULE_fo_type = 84
RULE_fo_backup = 85
RULE_fo_method = 86
RULE_fo_retries = 87
RULE_fo_delay = 88
ruleNames = ["tnsnames", "tns_entry", "ifile", "lsnr_entry", "lsnr_description",
"alias_list", "alias", "description_list", "dl_params",
"dl_parameter", "description", "d_params", "d_parameter",
"d_enable", "d_sdu", "d_recv_buf", "d_send_buf", "d_service_type",
"d_security", "d_conn_timeout", "d_retry_count", "d_tct",
"ds_parameter", "address_list", "al_params", "al_parameter",
"al_failover", "al_load_balance", "al_source_route",
"address", "a_params", "a_parameter", "protocol_info",
"tcp_protocol", "tcp_params", "tcp_parameter", "tcp_host",
"tcp_port", "tcp_tcp", "host", "port", "ipc_protocol",
"ipc_params", "ipc_parameter", "ipc_ipc", "ipc_key",
"spx_protocol", "spx_params", "spx_parameter", "spx_spx",
"spx_service", "nmp_protocol", "nmp_params", "nmp_parameter",
"nmp_nmp", "nmp_server", "nmp_pipe", "beq_protocol",
"beq_params", "beq_parameter", "beq_beq", "beq_program",
"beq_argv0", "beq_args", "ba_parameter", "ba_description",
"bad_params", "bad_parameter", "bad_local", "bad_address",
"connect_data", "cd_params", "cd_parameter", "cd_service_name",
"cd_sid", "cd_instance_name", "cd_failover_mode", "cd_global_name",
"cd_hs", "cd_rdb_database", "cd_server", "cd_ur", "fo_params",
"fo_parameter", "fo_type", "fo_backup", "fo_method",
"fo_retries", "fo_delay"]
EOF = Token.EOF
L_PAREN = 1
R_PAREN = 2
L_SQUARE = 3
R_SQUARE = 4
EQUAL = 5
DOT = 6
COMMA = 7
D_QUOTE = 8
S_QUOTE = 9
CONNECT_DATA = 10
DESCRIPTION_LIST = 11
DESCRIPTION = 12
ADDRESS_LIST = 13
ADDRESS = 14
PROTOCOL = 15
TCP = 16
HOST = 17
PORT = 18
LOCAL = 19
IP = 20
YES_NO = 21
ON_OFF = 22
TRUE_FALSE = 23
COMMENT = 24
INT = 25
OK = 26
DEDICATED = 27
SHARED = 28
POOLED = 29
LOAD_BALANCE = 30
FAILOVER = 31
UR = 32
UR_A = 33
ENABLE = 34
BROKEN = 35
SDU = 36
RECV_BUF = 37
SEND_BUF = 38
SOURCE_ROUTE = 39
SERVICE = 40
SERVICE_TYPE = 41
KEY = 42
IPC = 43
SPX = 44
NMP = 45
BEQ = 46
PIPE = 47
PROGRAM = 48
ARGV0 = 49
ARGS = 50
SECURITY = 51
SSL_CERT = 52
CONN_TIMEOUT = 53
RETRY_COUNT = 54
TCT = 55
IFILE = 56
DQ_STRING = 57
SERVICE_NAME = 58
SID = 59
INSTANCE_NAME = 60
FAILOVER_MODE = 61
GLOBAL_NAME = 62
HS = 63
RDB_DATABASE = 64
SERVER = 65
BACKUP = 66
TYPE = 67
SESSION = 68
SELECT = 69
NONE = 70
METHOD = 71
BASIC = 72
PRECONNECT = 73
RETRIES = 74
DELAY = 75
QUAD = 76
ID = 77
WS = 78
def __init__(self, input: TokenStream):
super().__init__(input)
self.checkVersion("4.5.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class TnsnamesContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def tns_entry(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Tns_entryContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Tns_entryContext, i)
def ifile(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.IfileContext)
else:
return self.getTypedRuleContext(tnsnamesParser.IfileContext, i)
def lsnr_entry(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Lsnr_entryContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Lsnr_entryContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_tnsnames
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterTnsnames"):
listener.enterTnsnames(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitTnsnames"):
listener.exitTnsnames(self)
def tnsnames(self):
localctx = tnsnamesParser.TnsnamesContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_tnsnames)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 183
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == tnsnamesParser.IFILE or _la == tnsnamesParser.ID:
self.state = 181
la_ = self._interp.adaptivePredict(self._input, 0, self._ctx)
if la_ == 1:
self.state = 178
self.tns_entry()
pass
elif la_ == 2:
self.state = 179
self.ifile()
pass
elif la_ == 3:
self.state = 180
self.lsnr_entry()
pass
self.state = 185
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tns_entryContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def alias_list(self):
return self.getTypedRuleContext(tnsnamesParser.Alias_listContext, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def description_list(self):
return self.getTypedRuleContext(tnsnamesParser.Description_listContext, 0)
def description(self):
return self.getTypedRuleContext(tnsnamesParser.DescriptionContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_tns_entry
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterTns_entry"):
listener.enterTns_entry(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitTns_entry"):
listener.exitTns_entry(self)
def tns_entry(self):
localctx = tnsnamesParser.Tns_entryContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_tns_entry)
try:
self.enterOuterAlt(localctx, 1)
self.state = 186
self.alias_list()
self.state = 187
self.match(tnsnamesParser.EQUAL)
self.state = 190
la_ = self._interp.adaptivePredict(self._input, 2, self._ctx)
if la_ == 1:
self.state = 188
self.description_list()
pass
elif la_ == 2:
self.state = 189
self.description()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IfileContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def IFILE(self):
return self.getToken(tnsnamesParser.IFILE, 0)
def DQ_STRING(self):
return self.getToken(tnsnamesParser.DQ_STRING, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_ifile
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterIfile"):
listener.enterIfile(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitIfile"):
listener.exitIfile(self)
def ifile(self):
localctx = tnsnamesParser.IfileContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_ifile)
try:
self.enterOuterAlt(localctx, 1)
self.state = 192
self.match(tnsnamesParser.IFILE)
self.state = 193
self.match(tnsnamesParser.EQUAL)
self.state = 194
self.match(tnsnamesParser.DQ_STRING)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Lsnr_entryContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def alias(self):
return self.getTypedRuleContext(tnsnamesParser.AliasContext, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def lsnr_description(self):
return self.getTypedRuleContext(tnsnamesParser.Lsnr_descriptionContext, 0)
def address_list(self):
return self.getTypedRuleContext(tnsnamesParser.Address_listContext, 0)
def address(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.AddressContext)
else:
return self.getTypedRuleContext(tnsnamesParser.AddressContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_lsnr_entry
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterLsnr_entry"):
listener.enterLsnr_entry(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitLsnr_entry"):
listener.exitLsnr_entry(self)
def lsnr_entry(self):
localctx = tnsnamesParser.Lsnr_entryContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_lsnr_entry)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 196
self.alias()
self.state = 197
self.match(tnsnamesParser.EQUAL)
self.state = 205
la_ = self._interp.adaptivePredict(self._input, 4, self._ctx)
if la_ == 1:
self.state = 198
self.lsnr_description()
pass
elif la_ == 2:
self.state = 199
self.address_list()
pass
elif la_ == 3:
self.state = 201
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 200
self.address()
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la == tnsnamesParser.L_PAREN):
break
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Lsnr_descriptionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def DESCRIPTION(self):
return self.getToken(tnsnamesParser.DESCRIPTION, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def address_list(self):
return self.getTypedRuleContext(tnsnamesParser.Address_listContext, 0)
def address(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.AddressContext)
else:
return self.getTypedRuleContext(tnsnamesParser.AddressContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_lsnr_description
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterLsnr_description"):
listener.enterLsnr_description(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitLsnr_description"):
listener.exitLsnr_description(self)
def lsnr_description(self):
localctx = tnsnamesParser.Lsnr_descriptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_lsnr_description)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 207
self.match(tnsnamesParser.L_PAREN)
self.state = 208
self.match(tnsnamesParser.DESCRIPTION)
self.state = 209
self.match(tnsnamesParser.EQUAL)
self.state = 216
la_ = self._interp.adaptivePredict(self._input, 6, self._ctx)
if la_ == 1:
self.state = 210
self.address_list()
pass
elif la_ == 2:
self.state = 212
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 211
self.address()
self.state = 214
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la == tnsnamesParser.L_PAREN):
break
pass
self.state = 218
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Alias_listContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def alias(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.AliasContext)
else:
return self.getTypedRuleContext(tnsnamesParser.AliasContext, i)
def COMMA(self, i: int = None):
if i is None:
return self.getTokens(tnsnamesParser.COMMA)
else:
return self.getToken(tnsnamesParser.COMMA, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_alias_list
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAlias_list"):
listener.enterAlias_list(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAlias_list"):
listener.exitAlias_list(self)
def alias_list(self):
localctx = tnsnamesParser.Alias_listContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_alias_list)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 220
self.alias()
self.state = 225
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == tnsnamesParser.COMMA:
self.state = 221
self.match(tnsnamesParser.COMMA)
self.state = 222
self.alias()
self.state = 227
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AliasContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_alias
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAlias"):
listener.enterAlias(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAlias"):
listener.exitAlias(self)
def alias(self):
localctx = tnsnamesParser.AliasContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_alias)
try:
self.enterOuterAlt(localctx, 1)
self.state = 228
self.match(tnsnamesParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Description_listContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def DESCRIPTION_LIST(self):
return self.getToken(tnsnamesParser.DESCRIPTION_LIST, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def dl_params(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Dl_paramsContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Dl_paramsContext, i)
def description(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.DescriptionContext)
else:
return self.getTypedRuleContext(tnsnamesParser.DescriptionContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_description_list
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterDescription_list"):
listener.enterDescription_list(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDescription_list"):
listener.exitDescription_list(self)
def description_list(self):
localctx = tnsnamesParser.Description_listContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_description_list)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 230
self.match(tnsnamesParser.L_PAREN)
self.state = 231
self.match(tnsnamesParser.DESCRIPTION_LIST)
self.state = 232
self.match(tnsnamesParser.EQUAL)
self.state = 234
la_ = self._interp.adaptivePredict(self._input, 8, self._ctx)
if la_ == 1:
self.state = 233
self.dl_params()
self.state = 237
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 236
self.description()
else:
raise NoViableAltException(self)
self.state = 239
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 9, self._ctx)
self.state = 242
_la = self._input.LA(1)
if _la == tnsnamesParser.L_PAREN:
self.state = 241
self.dl_params()
self.state = 244
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Dl_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def dl_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Dl_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Dl_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_dl_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterDl_params"):
listener.enterDl_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDl_params"):
listener.exitDl_params(self)
def dl_params(self):
localctx = tnsnamesParser.Dl_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_dl_params)
try:
self.enterOuterAlt(localctx, 1)
self.state = 247
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 246
self.dl_parameter()
else:
raise NoViableAltException(self)
self.state = 249
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 11, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Dl_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def al_failover(self):
return self.getTypedRuleContext(tnsnamesParser.Al_failoverContext, 0)
def al_load_balance(self):
return self.getTypedRuleContext(tnsnamesParser.Al_load_balanceContext, 0)
def al_source_route(self):
return self.getTypedRuleContext(tnsnamesParser.Al_source_routeContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_dl_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterDl_parameter"):
listener.enterDl_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDl_parameter"):
listener.exitDl_parameter(self)
def dl_parameter(self):
localctx = tnsnamesParser.Dl_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_dl_parameter)
try:
self.state = 254
la_ = self._interp.adaptivePredict(self._input, 12, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 251
self.al_failover()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 252
self.al_load_balance()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 253
self.al_source_route()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DescriptionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def DESCRIPTION(self):
return self.getToken(tnsnamesParser.DESCRIPTION, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def connect_data(self):
return self.getTypedRuleContext(tnsnamesParser.Connect_dataContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def address_list(self):
return self.getTypedRuleContext(tnsnamesParser.Address_listContext, 0)
def d_params(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.D_paramsContext)
else:
return self.getTypedRuleContext(tnsnamesParser.D_paramsContext, i)
def address(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.AddressContext)
else:
return self.getTypedRuleContext(tnsnamesParser.AddressContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_description
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterDescription"):
listener.enterDescription(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDescription"):
listener.exitDescription(self)
def description(self):
localctx = tnsnamesParser.DescriptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_description)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 256
self.match(tnsnamesParser.L_PAREN)
self.state = 257
self.match(tnsnamesParser.DESCRIPTION)
self.state = 258
self.match(tnsnamesParser.EQUAL)
self.state = 260
la_ = self._interp.adaptivePredict(self._input, 13, self._ctx)
if la_ == 1:
self.state = 259
self.d_params()
self.state = 268
la_ = self._interp.adaptivePredict(self._input, 15, self._ctx)
if la_ == 1:
self.state = 262
self.address_list()
pass
elif la_ == 2:
self.state = 264
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 263
self.address()
else:
raise NoViableAltException(self)
self.state = 266
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 14, self._ctx)
pass
self.state = 271
la_ = self._interp.adaptivePredict(self._input, 16, self._ctx)
if la_ == 1:
self.state = 270
self.d_params()
self.state = 273
self.connect_data()
self.state = 275
_la = self._input.LA(1)
if _la == tnsnamesParser.L_PAREN:
self.state = 274
self.d_params()
self.state = 277
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def d_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.D_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.D_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_params"):
listener.enterD_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_params"):
listener.exitD_params(self)
def d_params(self):
localctx = tnsnamesParser.D_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_d_params)
try:
self.enterOuterAlt(localctx, 1)
self.state = 280
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 279
self.d_parameter()
else:
raise NoViableAltException(self)
self.state = 282
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 18, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def d_enable(self):
return self.getTypedRuleContext(tnsnamesParser.D_enableContext, 0)
def al_failover(self):
return self.getTypedRuleContext(tnsnamesParser.Al_failoverContext, 0)
def al_load_balance(self):
return self.getTypedRuleContext(tnsnamesParser.Al_load_balanceContext, 0)
def d_sdu(self):
return self.getTypedRuleContext(tnsnamesParser.D_sduContext, 0)
def d_recv_buf(self):
return self.getTypedRuleContext(tnsnamesParser.D_recv_bufContext, 0)
def d_send_buf(self):
return self.getTypedRuleContext(tnsnamesParser.D_send_bufContext, 0)
def al_source_route(self):
return self.getTypedRuleContext(tnsnamesParser.Al_source_routeContext, 0)
def d_service_type(self):
return self.getTypedRuleContext(tnsnamesParser.D_service_typeContext, 0)
def d_security(self):
return self.getTypedRuleContext(tnsnamesParser.D_securityContext, 0)
def d_conn_timeout(self):
return self.getTypedRuleContext(tnsnamesParser.D_conn_timeoutContext, 0)
def d_retry_count(self):
return self.getTypedRuleContext(tnsnamesParser.D_retry_countContext, 0)
def d_tct(self):
return self.getTypedRuleContext(tnsnamesParser.D_tctContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_parameter"):
listener.enterD_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_parameter"):
listener.exitD_parameter(self)
def d_parameter(self):
localctx = tnsnamesParser.D_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_d_parameter)
try:
self.state = 296
la_ = self._interp.adaptivePredict(self._input, 19, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 284
self.d_enable()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 285
self.al_failover()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 286
self.al_load_balance()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 287
self.d_sdu()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 288
self.d_recv_buf()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 289
self.d_send_buf()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 290
self.al_source_route()
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 291
self.d_service_type()
pass
elif la_ == 9:
self.enterOuterAlt(localctx, 9)
self.state = 292
self.d_security()
pass
elif la_ == 10:
self.enterOuterAlt(localctx, 10)
self.state = 293
self.d_conn_timeout()
pass
elif la_ == 11:
self.enterOuterAlt(localctx, 11)
self.state = 294
self.d_retry_count()
pass
elif la_ == 12:
self.enterOuterAlt(localctx, 12)
self.state = 295
self.d_tct()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_enableContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def ENABLE(self):
return self.getToken(tnsnamesParser.ENABLE, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def BROKEN(self):
return self.getToken(tnsnamesParser.BROKEN, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_enable
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_enable"):
listener.enterD_enable(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_enable"):
listener.exitD_enable(self)
def d_enable(self):
localctx = tnsnamesParser.D_enableContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_d_enable)
try:
self.enterOuterAlt(localctx, 1)
self.state = 298
self.match(tnsnamesParser.L_PAREN)
self.state = 299
self.match(tnsnamesParser.ENABLE)
self.state = 300
self.match(tnsnamesParser.EQUAL)
self.state = 301
self.match(tnsnamesParser.BROKEN)
self.state = 302
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_sduContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SDU(self):
return self.getToken(tnsnamesParser.SDU, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def INT(self):
return self.getToken(tnsnamesParser.INT, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_sdu
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_sdu"):
listener.enterD_sdu(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_sdu"):
listener.exitD_sdu(self)
def d_sdu(self):
localctx = tnsnamesParser.D_sduContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_d_sdu)
try:
self.enterOuterAlt(localctx, 1)
self.state = 304
self.match(tnsnamesParser.L_PAREN)
self.state = 305
self.match(tnsnamesParser.SDU)
self.state = 306
self.match(tnsnamesParser.EQUAL)
self.state = 307
self.match(tnsnamesParser.INT)
self.state = 308
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_recv_bufContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def RECV_BUF(self):
return self.getToken(tnsnamesParser.RECV_BUF, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def INT(self):
return self.getToken(tnsnamesParser.INT, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_recv_buf
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_recv_buf"):
listener.enterD_recv_buf(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_recv_buf"):
listener.exitD_recv_buf(self)
def d_recv_buf(self):
localctx = tnsnamesParser.D_recv_bufContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_d_recv_buf)
try:
self.enterOuterAlt(localctx, 1)
self.state = 310
self.match(tnsnamesParser.L_PAREN)
self.state = 311
self.match(tnsnamesParser.RECV_BUF)
self.state = 312
self.match(tnsnamesParser.EQUAL)
self.state = 313
self.match(tnsnamesParser.INT)
self.state = 314
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_send_bufContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SEND_BUF(self):
return self.getToken(tnsnamesParser.SEND_BUF, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def INT(self):
return self.getToken(tnsnamesParser.INT, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_send_buf
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_send_buf"):
listener.enterD_send_buf(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_send_buf"):
listener.exitD_send_buf(self)
def d_send_buf(self):
localctx = tnsnamesParser.D_send_bufContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_d_send_buf)
try:
self.enterOuterAlt(localctx, 1)
self.state = 316
self.match(tnsnamesParser.L_PAREN)
self.state = 317
self.match(tnsnamesParser.SEND_BUF)
self.state = 318
self.match(tnsnamesParser.EQUAL)
self.state = 319
self.match(tnsnamesParser.INT)
self.state = 320
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_service_typeContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SERVICE_TYPE(self):
return self.getToken(tnsnamesParser.SERVICE_TYPE, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_service_type
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_service_type"):
listener.enterD_service_type(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_service_type"):
listener.exitD_service_type(self)
def d_service_type(self):
localctx = tnsnamesParser.D_service_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_d_service_type)
try:
self.enterOuterAlt(localctx, 1)
self.state = 322
self.match(tnsnamesParser.L_PAREN)
self.state = 323
self.match(tnsnamesParser.SERVICE_TYPE)
self.state = 324
self.match(tnsnamesParser.EQUAL)
self.state = 325
self.match(tnsnamesParser.ID)
self.state = 326
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_securityContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SECURITY(self):
return self.getToken(tnsnamesParser.SECURITY, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ds_parameter(self):
return self.getTypedRuleContext(tnsnamesParser.Ds_parameterContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_security
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_security"):
listener.enterD_security(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_security"):
listener.exitD_security(self)
def d_security(self):
localctx = tnsnamesParser.D_securityContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_d_security)
try:
self.enterOuterAlt(localctx, 1)
self.state = 328
self.match(tnsnamesParser.L_PAREN)
self.state = 329
self.match(tnsnamesParser.SECURITY)
self.state = 330
self.match(tnsnamesParser.EQUAL)
self.state = 331
self.ds_parameter()
self.state = 332
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_conn_timeoutContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def CONN_TIMEOUT(self):
return self.getToken(tnsnamesParser.CONN_TIMEOUT, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def INT(self):
return self.getToken(tnsnamesParser.INT, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_conn_timeout
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_conn_timeout"):
listener.enterD_conn_timeout(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_conn_timeout"):
listener.exitD_conn_timeout(self)
def d_conn_timeout(self):
localctx = tnsnamesParser.D_conn_timeoutContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_d_conn_timeout)
try:
self.enterOuterAlt(localctx, 1)
self.state = 334
self.match(tnsnamesParser.L_PAREN)
self.state = 335
self.match(tnsnamesParser.CONN_TIMEOUT)
self.state = 336
self.match(tnsnamesParser.EQUAL)
self.state = 337
self.match(tnsnamesParser.INT)
self.state = 338
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_retry_countContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def RETRY_COUNT(self):
return self.getToken(tnsnamesParser.RETRY_COUNT, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def INT(self):
return self.getToken(tnsnamesParser.INT, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_retry_count
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_retry_count"):
listener.enterD_retry_count(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_retry_count"):
listener.exitD_retry_count(self)
def d_retry_count(self):
localctx = tnsnamesParser.D_retry_countContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_d_retry_count)
try:
self.enterOuterAlt(localctx, 1)
self.state = 340
self.match(tnsnamesParser.L_PAREN)
self.state = 341
self.match(tnsnamesParser.RETRY_COUNT)
self.state = 342
self.match(tnsnamesParser.EQUAL)
self.state = 343
self.match(tnsnamesParser.INT)
self.state = 344
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class D_tctContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def TCT(self):
return self.getToken(tnsnamesParser.TCT, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def INT(self):
return self.getToken(tnsnamesParser.INT, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_d_tct
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterD_tct"):
listener.enterD_tct(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitD_tct"):
listener.exitD_tct(self)
def d_tct(self):
localctx = tnsnamesParser.D_tctContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_d_tct)
try:
self.enterOuterAlt(localctx, 1)
self.state = 346
self.match(tnsnamesParser.L_PAREN)
self.state = 347
self.match(tnsnamesParser.TCT)
self.state = 348
self.match(tnsnamesParser.EQUAL)
self.state = 349
self.match(tnsnamesParser.INT)
self.state = 350
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ds_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SSL_CERT(self):
return self.getToken(tnsnamesParser.SSL_CERT, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def DQ_STRING(self):
return self.getToken(tnsnamesParser.DQ_STRING, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_ds_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterDs_parameter"):
listener.enterDs_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDs_parameter"):
listener.exitDs_parameter(self)
def ds_parameter(self):
localctx = tnsnamesParser.Ds_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_ds_parameter)
try:
self.enterOuterAlt(localctx, 1)
self.state = 352
self.match(tnsnamesParser.L_PAREN)
self.state = 353
self.match(tnsnamesParser.SSL_CERT)
self.state = 354
self.match(tnsnamesParser.EQUAL)
self.state = 355
self.match(tnsnamesParser.DQ_STRING)
self.state = 356
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Address_listContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def ADDRESS_LIST(self):
return self.getToken(tnsnamesParser.ADDRESS_LIST, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def al_params(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Al_paramsContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Al_paramsContext, i)
def address(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.AddressContext)
else:
return self.getTypedRuleContext(tnsnamesParser.AddressContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_address_list
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAddress_list"):
listener.enterAddress_list(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAddress_list"):
listener.exitAddress_list(self)
def address_list(self):
localctx = tnsnamesParser.Address_listContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_address_list)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 358
self.match(tnsnamesParser.L_PAREN)
self.state = 359
self.match(tnsnamesParser.ADDRESS_LIST)
self.state = 360
self.match(tnsnamesParser.EQUAL)
self.state = 362
la_ = self._interp.adaptivePredict(self._input, 20, self._ctx)
if la_ == 1:
self.state = 361
self.al_params()
self.state = 365
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 364
self.address()
else:
raise NoViableAltException(self)
self.state = 367
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 21, self._ctx)
self.state = 370
_la = self._input.LA(1)
if _la == tnsnamesParser.L_PAREN:
self.state = 369
self.al_params()
self.state = 372
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Al_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def al_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Al_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Al_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_al_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAl_params"):
listener.enterAl_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAl_params"):
listener.exitAl_params(self)
def al_params(self):
localctx = tnsnamesParser.Al_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_al_params)
try:
self.enterOuterAlt(localctx, 1)
self.state = 375
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 374
self.al_parameter()
else:
raise NoViableAltException(self)
self.state = 377
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 23, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Al_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def al_failover(self):
return self.getTypedRuleContext(tnsnamesParser.Al_failoverContext, 0)
def al_load_balance(self):
return self.getTypedRuleContext(tnsnamesParser.Al_load_balanceContext, 0)
def al_source_route(self):
return self.getTypedRuleContext(tnsnamesParser.Al_source_routeContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_al_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAl_parameter"):
listener.enterAl_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAl_parameter"):
listener.exitAl_parameter(self)
def al_parameter(self):
localctx = tnsnamesParser.Al_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_al_parameter)
try:
self.state = 382
la_ = self._interp.adaptivePredict(self._input, 24, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 379
self.al_failover()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 380
self.al_load_balance()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 381
self.al_source_route()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Al_failoverContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def FAILOVER(self):
return self.getToken(tnsnamesParser.FAILOVER, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def YES_NO(self):
return self.getToken(tnsnamesParser.YES_NO, 0)
def ON_OFF(self):
return self.getToken(tnsnamesParser.ON_OFF, 0)
def TRUE_FALSE(self):
return self.getToken(tnsnamesParser.TRUE_FALSE, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_al_failover
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAl_failover"):
listener.enterAl_failover(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAl_failover"):
listener.exitAl_failover(self)
def al_failover(self):
localctx = tnsnamesParser.Al_failoverContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_al_failover)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 384
self.match(tnsnamesParser.L_PAREN)
self.state = 385
self.match(tnsnamesParser.FAILOVER)
self.state = 386
self.match(tnsnamesParser.EQUAL)
self.state = 387
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & (
(1 << tnsnamesParser.YES_NO) | (1 << tnsnamesParser.ON_OFF) | (
1 << tnsnamesParser.TRUE_FALSE))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 388
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Al_load_balanceContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def LOAD_BALANCE(self):
return self.getToken(tnsnamesParser.LOAD_BALANCE, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def YES_NO(self):
return self.getToken(tnsnamesParser.YES_NO, 0)
def ON_OFF(self):
return self.getToken(tnsnamesParser.ON_OFF, 0)
def TRUE_FALSE(self):
return self.getToken(tnsnamesParser.TRUE_FALSE, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_al_load_balance
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAl_load_balance"):
listener.enterAl_load_balance(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAl_load_balance"):
listener.exitAl_load_balance(self)
def al_load_balance(self):
localctx = tnsnamesParser.Al_load_balanceContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_al_load_balance)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 390
self.match(tnsnamesParser.L_PAREN)
self.state = 391
self.match(tnsnamesParser.LOAD_BALANCE)
self.state = 392
self.match(tnsnamesParser.EQUAL)
self.state = 393
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & (
(1 << tnsnamesParser.YES_NO) | (1 << tnsnamesParser.ON_OFF) | (
1 << tnsnamesParser.TRUE_FALSE))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 394
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Al_source_routeContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SOURCE_ROUTE(self):
return self.getToken(tnsnamesParser.SOURCE_ROUTE, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def YES_NO(self):
return self.getToken(tnsnamesParser.YES_NO, 0)
def ON_OFF(self):
return self.getToken(tnsnamesParser.ON_OFF, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_al_source_route
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAl_source_route"):
listener.enterAl_source_route(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAl_source_route"):
listener.exitAl_source_route(self)
def al_source_route(self):
localctx = tnsnamesParser.Al_source_routeContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_al_source_route)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 396
self.match(tnsnamesParser.L_PAREN)
self.state = 397
self.match(tnsnamesParser.SOURCE_ROUTE)
self.state = 398
self.match(tnsnamesParser.EQUAL)
self.state = 399
_la = self._input.LA(1)
if not (_la == tnsnamesParser.YES_NO or _la == tnsnamesParser.ON_OFF):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 400
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AddressContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def ADDRESS(self):
return self.getToken(tnsnamesParser.ADDRESS, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def protocol_info(self):
return self.getTypedRuleContext(tnsnamesParser.Protocol_infoContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def a_params(self):
return self.getTypedRuleContext(tnsnamesParser.A_paramsContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_address
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAddress"):
listener.enterAddress(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAddress"):
listener.exitAddress(self)
def address(self):
localctx = tnsnamesParser.AddressContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_address)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 402
self.match(tnsnamesParser.L_PAREN)
self.state = 403
self.match(tnsnamesParser.ADDRESS)
self.state = 404
self.match(tnsnamesParser.EQUAL)
self.state = 405
self.protocol_info()
self.state = 407
_la = self._input.LA(1)
if _la == tnsnamesParser.L_PAREN:
self.state = 406
self.a_params()
self.state = 409
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class A_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def a_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.A_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.A_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_a_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterA_params"):
listener.enterA_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitA_params"):
listener.exitA_params(self)
def a_params(self):
localctx = tnsnamesParser.A_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_a_params)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 412
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 411
self.a_parameter()
self.state = 414
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la == tnsnamesParser.L_PAREN):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class A_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def d_send_buf(self):
return self.getTypedRuleContext(tnsnamesParser.D_send_bufContext, 0)
def d_recv_buf(self):
return self.getTypedRuleContext(tnsnamesParser.D_recv_bufContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_a_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterA_parameter"):
listener.enterA_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitA_parameter"):
listener.exitA_parameter(self)
def a_parameter(self):
localctx = tnsnamesParser.A_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_a_parameter)
try:
self.state = 418
la_ = self._interp.adaptivePredict(self._input, 27, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 416
self.d_send_buf()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 417
self.d_recv_buf()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Protocol_infoContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def tcp_protocol(self):
return self.getTypedRuleContext(tnsnamesParser.Tcp_protocolContext, 0)
def ipc_protocol(self):
return self.getTypedRuleContext(tnsnamesParser.Ipc_protocolContext, 0)
def spx_protocol(self):
return self.getTypedRuleContext(tnsnamesParser.Spx_protocolContext, 0)
def nmp_protocol(self):
return self.getTypedRuleContext(tnsnamesParser.Nmp_protocolContext, 0)
def beq_protocol(self):
return self.getTypedRuleContext(tnsnamesParser.Beq_protocolContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_protocol_info
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterProtocol_info"):
listener.enterProtocol_info(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitProtocol_info"):
listener.exitProtocol_info(self)
def protocol_info(self):
localctx = tnsnamesParser.Protocol_infoContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_protocol_info)
try:
self.state = 425
la_ = self._interp.adaptivePredict(self._input, 28, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 420
self.tcp_protocol()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 421
self.ipc_protocol()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 422
self.spx_protocol()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 423
self.nmp_protocol()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 424
self.beq_protocol()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tcp_protocolContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def tcp_params(self):
return self.getTypedRuleContext(tnsnamesParser.Tcp_paramsContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_tcp_protocol
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterTcp_protocol"):
listener.enterTcp_protocol(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitTcp_protocol"):
listener.exitTcp_protocol(self)
def tcp_protocol(self):
localctx = tnsnamesParser.Tcp_protocolContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_tcp_protocol)
try:
self.enterOuterAlt(localctx, 1)
self.state = 427
self.tcp_params()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tcp_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def tcp_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Tcp_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Tcp_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_tcp_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterTcp_params"):
listener.enterTcp_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitTcp_params"):
listener.exitTcp_params(self)
def tcp_params(self):
localctx = tnsnamesParser.Tcp_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_tcp_params)
try:
self.enterOuterAlt(localctx, 1)
self.state = 430
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 429
self.tcp_parameter()
else:
raise NoViableAltException(self)
self.state = 432
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 29, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tcp_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def tcp_host(self):
return self.getTypedRuleContext(tnsnamesParser.Tcp_hostContext, 0)
def tcp_port(self):
return self.getTypedRuleContext(tnsnamesParser.Tcp_portContext, 0)
def tcp_tcp(self):
return self.getTypedRuleContext(tnsnamesParser.Tcp_tcpContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_tcp_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterTcp_parameter"):
listener.enterTcp_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitTcp_parameter"):
listener.exitTcp_parameter(self)
def tcp_parameter(self):
localctx = tnsnamesParser.Tcp_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_tcp_parameter)
try:
self.state = 437
la_ = self._interp.adaptivePredict(self._input, 30, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 434
self.tcp_host()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 435
self.tcp_port()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 436
self.tcp_tcp()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tcp_hostContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def HOST(self):
return self.getToken(tnsnamesParser.HOST, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def host(self):
return self.getTypedRuleContext(tnsnamesParser.HostContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_tcp_host
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterTcp_host"):
listener.enterTcp_host(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitTcp_host"):
listener.exitTcp_host(self)
def tcp_host(self):
localctx = tnsnamesParser.Tcp_hostContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_tcp_host)
try:
self.enterOuterAlt(localctx, 1)
self.state = 439
self.match(tnsnamesParser.L_PAREN)
self.state = 440
self.match(tnsnamesParser.HOST)
self.state = 441
self.match(tnsnamesParser.EQUAL)
self.state = 442
self.host()
self.state = 443
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tcp_portContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def PORT(self):
return self.getToken(tnsnamesParser.PORT, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def port(self):
return self.getTypedRuleContext(tnsnamesParser.PortContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_tcp_port
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterTcp_port"):
listener.enterTcp_port(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitTcp_port"):
listener.exitTcp_port(self)
def tcp_port(self):
localctx = tnsnamesParser.Tcp_portContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_tcp_port)
try:
self.enterOuterAlt(localctx, 1)
self.state = 445
self.match(tnsnamesParser.L_PAREN)
self.state = 446
self.match(tnsnamesParser.PORT)
self.state = 447
self.match(tnsnamesParser.EQUAL)
self.state = 448
self.port()
self.state = 449
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tcp_tcpContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def PROTOCOL(self):
return self.getToken(tnsnamesParser.PROTOCOL, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def TCP(self):
return self.getToken(tnsnamesParser.TCP, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_tcp_tcp
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterTcp_tcp"):
listener.enterTcp_tcp(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitTcp_tcp"):
listener.exitTcp_tcp(self)
def tcp_tcp(self):
localctx = tnsnamesParser.Tcp_tcpContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_tcp_tcp)
try:
self.enterOuterAlt(localctx, 1)
self.state = 451
self.match(tnsnamesParser.L_PAREN)
self.state = 452
self.match(tnsnamesParser.PROTOCOL)
self.state = 453
self.match(tnsnamesParser.EQUAL)
self.state = 454
self.match(tnsnamesParser.TCP)
self.state = 455
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class HostContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def IP(self):
return self.getToken(tnsnamesParser.IP, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_host
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterHost"):
listener.enterHost(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitHost"):
listener.exitHost(self)
def host(self):
localctx = tnsnamesParser.HostContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_host)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 457
_la = self._input.LA(1)
if not (_la == tnsnamesParser.IP or _la == tnsnamesParser.ID):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PortContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def INT(self):
return self.getToken(tnsnamesParser.INT, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_port
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterPort"):
listener.enterPort(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitPort"):
listener.exitPort(self)
def port(self):
localctx = tnsnamesParser.PortContext(self, self._ctx, self.state)
self.enterRule(localctx, 80, self.RULE_port)
try:
self.enterOuterAlt(localctx, 1)
self.state = 459
self.match(tnsnamesParser.INT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ipc_protocolContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def ipc_params(self):
return self.getTypedRuleContext(tnsnamesParser.Ipc_paramsContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_ipc_protocol
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterIpc_protocol"):
listener.enterIpc_protocol(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitIpc_protocol"):
listener.exitIpc_protocol(self)
def ipc_protocol(self):
localctx = tnsnamesParser.Ipc_protocolContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_ipc_protocol)
try:
self.enterOuterAlt(localctx, 1)
self.state = 461
self.ipc_params()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ipc_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def ipc_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Ipc_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Ipc_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_ipc_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterIpc_params"):
listener.enterIpc_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitIpc_params"):
listener.exitIpc_params(self)
def ipc_params(self):
localctx = tnsnamesParser.Ipc_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_ipc_params)
try:
self.enterOuterAlt(localctx, 1)
self.state = 464
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 463
self.ipc_parameter()
else:
raise NoViableAltException(self)
self.state = 466
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 31, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ipc_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def ipc_ipc(self):
return self.getTypedRuleContext(tnsnamesParser.Ipc_ipcContext, 0)
def ipc_key(self):
return self.getTypedRuleContext(tnsnamesParser.Ipc_keyContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_ipc_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterIpc_parameter"):
listener.enterIpc_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitIpc_parameter"):
listener.exitIpc_parameter(self)
def ipc_parameter(self):
localctx = tnsnamesParser.Ipc_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_ipc_parameter)
try:
self.state = 470
la_ = self._interp.adaptivePredict(self._input, 32, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 468
self.ipc_ipc()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 469
self.ipc_key()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ipc_ipcContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def PROTOCOL(self):
return self.getToken(tnsnamesParser.PROTOCOL, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def IPC(self):
return self.getToken(tnsnamesParser.IPC, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_ipc_ipc
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterIpc_ipc"):
listener.enterIpc_ipc(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitIpc_ipc"):
listener.exitIpc_ipc(self)
def ipc_ipc(self):
localctx = tnsnamesParser.Ipc_ipcContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_ipc_ipc)
try:
self.enterOuterAlt(localctx, 1)
self.state = 472
self.match(tnsnamesParser.L_PAREN)
self.state = 473
self.match(tnsnamesParser.PROTOCOL)
self.state = 474
self.match(tnsnamesParser.EQUAL)
self.state = 475
self.match(tnsnamesParser.IPC)
self.state = 476
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ipc_keyContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def KEY(self):
return self.getToken(tnsnamesParser.KEY, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_ipc_key
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterIpc_key"):
listener.enterIpc_key(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitIpc_key"):
listener.exitIpc_key(self)
def ipc_key(self):
localctx = tnsnamesParser.Ipc_keyContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_ipc_key)
try:
self.enterOuterAlt(localctx, 1)
self.state = 478
self.match(tnsnamesParser.L_PAREN)
self.state = 479
self.match(tnsnamesParser.KEY)
self.state = 480
self.match(tnsnamesParser.EQUAL)
self.state = 481
self.match(tnsnamesParser.ID)
self.state = 482
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Spx_protocolContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def spx_params(self):
return self.getTypedRuleContext(tnsnamesParser.Spx_paramsContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_spx_protocol
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterSpx_protocol"):
listener.enterSpx_protocol(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitSpx_protocol"):
listener.exitSpx_protocol(self)
def spx_protocol(self):
localctx = tnsnamesParser.Spx_protocolContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_spx_protocol)
try:
self.enterOuterAlt(localctx, 1)
self.state = 484
self.spx_params()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Spx_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def spx_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Spx_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Spx_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_spx_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterSpx_params"):
listener.enterSpx_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitSpx_params"):
listener.exitSpx_params(self)
def spx_params(self):
localctx = tnsnamesParser.Spx_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_spx_params)
try:
self.enterOuterAlt(localctx, 1)
self.state = 487
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 486
self.spx_parameter()
else:
raise NoViableAltException(self)
self.state = 489
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 33, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Spx_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def spx_spx(self):
return self.getTypedRuleContext(tnsnamesParser.Spx_spxContext, 0)
def spx_service(self):
return self.getTypedRuleContext(tnsnamesParser.Spx_serviceContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_spx_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterSpx_parameter"):
listener.enterSpx_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitSpx_parameter"):
listener.exitSpx_parameter(self)
def spx_parameter(self):
localctx = tnsnamesParser.Spx_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 96, self.RULE_spx_parameter)
try:
self.state = 493
la_ = self._interp.adaptivePredict(self._input, 34, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 491
self.spx_spx()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 492
self.spx_service()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Spx_spxContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def PROTOCOL(self):
return self.getToken(tnsnamesParser.PROTOCOL, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def SPX(self):
return self.getToken(tnsnamesParser.SPX, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_spx_spx
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterSpx_spx"):
listener.enterSpx_spx(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitSpx_spx"):
listener.exitSpx_spx(self)
def spx_spx(self):
localctx = tnsnamesParser.Spx_spxContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_spx_spx)
try:
self.enterOuterAlt(localctx, 1)
self.state = 495
self.match(tnsnamesParser.L_PAREN)
self.state = 496
self.match(tnsnamesParser.PROTOCOL)
self.state = 497
self.match(tnsnamesParser.EQUAL)
self.state = 498
self.match(tnsnamesParser.SPX)
self.state = 499
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Spx_serviceContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SERVICE(self):
return self.getToken(tnsnamesParser.SERVICE, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_spx_service
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterSpx_service"):
listener.enterSpx_service(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitSpx_service"):
listener.exitSpx_service(self)
def spx_service(self):
localctx = tnsnamesParser.Spx_serviceContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_spx_service)
try:
self.enterOuterAlt(localctx, 1)
self.state = 501
self.match(tnsnamesParser.L_PAREN)
self.state = 502
self.match(tnsnamesParser.SERVICE)
self.state = 503
self.match(tnsnamesParser.EQUAL)
self.state = 504
self.match(tnsnamesParser.ID)
self.state = 505
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Nmp_protocolContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def nmp_params(self):
return self.getTypedRuleContext(tnsnamesParser.Nmp_paramsContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_nmp_protocol
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterNmp_protocol"):
listener.enterNmp_protocol(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitNmp_protocol"):
listener.exitNmp_protocol(self)
def nmp_protocol(self):
localctx = tnsnamesParser.Nmp_protocolContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_nmp_protocol)
try:
self.enterOuterAlt(localctx, 1)
self.state = 507
self.nmp_params()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Nmp_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def nmp_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Nmp_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Nmp_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_nmp_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterNmp_params"):
listener.enterNmp_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitNmp_params"):
listener.exitNmp_params(self)
def nmp_params(self):
localctx = tnsnamesParser.Nmp_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_nmp_params)
try:
self.enterOuterAlt(localctx, 1)
self.state = 510
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 509
self.nmp_parameter()
else:
raise NoViableAltException(self)
self.state = 512
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 35, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Nmp_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def nmp_nmp(self):
return self.getTypedRuleContext(tnsnamesParser.Nmp_nmpContext, 0)
def nmp_server(self):
return self.getTypedRuleContext(tnsnamesParser.Nmp_serverContext, 0)
def nmp_pipe(self):
return self.getTypedRuleContext(tnsnamesParser.Nmp_pipeContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_nmp_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterNmp_parameter"):
listener.enterNmp_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitNmp_parameter"):
listener.exitNmp_parameter(self)
def nmp_parameter(self):
localctx = tnsnamesParser.Nmp_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 106, self.RULE_nmp_parameter)
try:
self.state = 517
la_ = self._interp.adaptivePredict(self._input, 36, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 514
self.nmp_nmp()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 515
self.nmp_server()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 516
self.nmp_pipe()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Nmp_nmpContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def PROTOCOL(self):
return self.getToken(tnsnamesParser.PROTOCOL, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def NMP(self):
return self.getToken(tnsnamesParser.NMP, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_nmp_nmp
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterNmp_nmp"):
listener.enterNmp_nmp(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitNmp_nmp"):
listener.exitNmp_nmp(self)
def nmp_nmp(self):
localctx = tnsnamesParser.Nmp_nmpContext(self, self._ctx, self.state)
self.enterRule(localctx, 108, self.RULE_nmp_nmp)
try:
self.enterOuterAlt(localctx, 1)
self.state = 519
self.match(tnsnamesParser.L_PAREN)
self.state = 520
self.match(tnsnamesParser.PROTOCOL)
self.state = 521
self.match(tnsnamesParser.EQUAL)
self.state = 522
self.match(tnsnamesParser.NMP)
self.state = 523
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Nmp_serverContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SERVER(self):
return self.getToken(tnsnamesParser.SERVER, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_nmp_server
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterNmp_server"):
listener.enterNmp_server(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitNmp_server"):
listener.exitNmp_server(self)
def nmp_server(self):
localctx = tnsnamesParser.Nmp_serverContext(self, self._ctx, self.state)
self.enterRule(localctx, 110, self.RULE_nmp_server)
try:
self.enterOuterAlt(localctx, 1)
self.state = 525
self.match(tnsnamesParser.L_PAREN)
self.state = 526
self.match(tnsnamesParser.SERVER)
self.state = 527
self.match(tnsnamesParser.EQUAL)
self.state = 528
self.match(tnsnamesParser.ID)
self.state = 529
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Nmp_pipeContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def PIPE(self):
return self.getToken(tnsnamesParser.PIPE, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_nmp_pipe
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterNmp_pipe"):
listener.enterNmp_pipe(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitNmp_pipe"):
listener.exitNmp_pipe(self)
def nmp_pipe(self):
localctx = tnsnamesParser.Nmp_pipeContext(self, self._ctx, self.state)
self.enterRule(localctx, 112, self.RULE_nmp_pipe)
try:
self.enterOuterAlt(localctx, 1)
self.state = 531
self.match(tnsnamesParser.L_PAREN)
self.state = 532
self.match(tnsnamesParser.PIPE)
self.state = 533
self.match(tnsnamesParser.EQUAL)
self.state = 534
self.match(tnsnamesParser.ID)
self.state = 535
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Beq_protocolContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def beq_params(self):
return self.getTypedRuleContext(tnsnamesParser.Beq_paramsContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_beq_protocol
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBeq_protocol"):
listener.enterBeq_protocol(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBeq_protocol"):
listener.exitBeq_protocol(self)
def beq_protocol(self):
localctx = tnsnamesParser.Beq_protocolContext(self, self._ctx, self.state)
self.enterRule(localctx, 114, self.RULE_beq_protocol)
try:
self.enterOuterAlt(localctx, 1)
self.state = 537
self.beq_params()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Beq_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def beq_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Beq_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Beq_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_beq_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBeq_params"):
listener.enterBeq_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBeq_params"):
listener.exitBeq_params(self)
def beq_params(self):
localctx = tnsnamesParser.Beq_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 116, self.RULE_beq_params)
try:
self.enterOuterAlt(localctx, 1)
self.state = 540
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 539
self.beq_parameter()
else:
raise NoViableAltException(self)
self.state = 542
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 37, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Beq_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def beq_beq(self):
return self.getTypedRuleContext(tnsnamesParser.Beq_beqContext, 0)
def beq_program(self):
return self.getTypedRuleContext(tnsnamesParser.Beq_programContext, 0)
def beq_argv0(self):
return self.getTypedRuleContext(tnsnamesParser.Beq_argv0Context, 0)
def beq_args(self):
return self.getTypedRuleContext(tnsnamesParser.Beq_argsContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_beq_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBeq_parameter"):
listener.enterBeq_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBeq_parameter"):
listener.exitBeq_parameter(self)
def beq_parameter(self):
localctx = tnsnamesParser.Beq_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 118, self.RULE_beq_parameter)
try:
self.state = 548
la_ = self._interp.adaptivePredict(self._input, 38, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 544
self.beq_beq()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 545
self.beq_program()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 546
self.beq_argv0()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 547
self.beq_args()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Beq_beqContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def PROTOCOL(self):
return self.getToken(tnsnamesParser.PROTOCOL, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def BEQ(self):
return self.getToken(tnsnamesParser.BEQ, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_beq_beq
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBeq_beq"):
listener.enterBeq_beq(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBeq_beq"):
listener.exitBeq_beq(self)
def beq_beq(self):
localctx = tnsnamesParser.Beq_beqContext(self, self._ctx, self.state)
self.enterRule(localctx, 120, self.RULE_beq_beq)
try:
self.enterOuterAlt(localctx, 1)
self.state = 550
self.match(tnsnamesParser.L_PAREN)
self.state = 551
self.match(tnsnamesParser.PROTOCOL)
self.state = 552
self.match(tnsnamesParser.EQUAL)
self.state = 553
self.match(tnsnamesParser.BEQ)
self.state = 554
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Beq_programContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def PROGRAM(self):
return self.getToken(tnsnamesParser.PROGRAM, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_beq_program
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBeq_program"):
listener.enterBeq_program(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBeq_program"):
listener.exitBeq_program(self)
def beq_program(self):
localctx = tnsnamesParser.Beq_programContext(self, self._ctx, self.state)
self.enterRule(localctx, 122, self.RULE_beq_program)
try:
self.enterOuterAlt(localctx, 1)
self.state = 556
self.match(tnsnamesParser.L_PAREN)
self.state = 557
self.match(tnsnamesParser.PROGRAM)
self.state = 558
self.match(tnsnamesParser.EQUAL)
self.state = 559
self.match(tnsnamesParser.ID)
self.state = 560
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Beq_argv0Context(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def ARGV0(self):
return self.getToken(tnsnamesParser.ARGV0, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_beq_argv0
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBeq_argv0"):
listener.enterBeq_argv0(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBeq_argv0"):
listener.exitBeq_argv0(self)
def beq_argv0(self):
localctx = tnsnamesParser.Beq_argv0Context(self, self._ctx, self.state)
self.enterRule(localctx, 124, self.RULE_beq_argv0)
try:
self.enterOuterAlt(localctx, 1)
self.state = 562
self.match(tnsnamesParser.L_PAREN)
self.state = 563
self.match(tnsnamesParser.ARGV0)
self.state = 564
self.match(tnsnamesParser.EQUAL)
self.state = 565
self.match(tnsnamesParser.ID)
self.state = 566
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Beq_argsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def ARGS(self):
return self.getToken(tnsnamesParser.ARGS, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ba_parameter(self):
return self.getTypedRuleContext(tnsnamesParser.Ba_parameterContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_beq_args
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBeq_args"):
listener.enterBeq_args(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBeq_args"):
listener.exitBeq_args(self)
def beq_args(self):
localctx = tnsnamesParser.Beq_argsContext(self, self._ctx, self.state)
self.enterRule(localctx, 126, self.RULE_beq_args)
try:
self.enterOuterAlt(localctx, 1)
self.state = 568
self.match(tnsnamesParser.L_PAREN)
self.state = 569
self.match(tnsnamesParser.ARGS)
self.state = 570
self.match(tnsnamesParser.EQUAL)
self.state = 571
self.ba_parameter()
self.state = 572
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ba_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def S_QUOTE(self, i: int = None):
if i is None:
return self.getTokens(tnsnamesParser.S_QUOTE)
else:
return self.getToken(tnsnamesParser.S_QUOTE, i)
def ba_description(self):
return self.getTypedRuleContext(tnsnamesParser.Ba_descriptionContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_ba_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBa_parameter"):
listener.enterBa_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBa_parameter"):
listener.exitBa_parameter(self)
def ba_parameter(self):
localctx = tnsnamesParser.Ba_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 128, self.RULE_ba_parameter)
try:
self.enterOuterAlt(localctx, 1)
self.state = 574
self.match(tnsnamesParser.S_QUOTE)
self.state = 575
self.ba_description()
self.state = 576
self.match(tnsnamesParser.S_QUOTE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Ba_descriptionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def DESCRIPTION(self):
return self.getToken(tnsnamesParser.DESCRIPTION, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def bad_params(self):
return self.getTypedRuleContext(tnsnamesParser.Bad_paramsContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_ba_description
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBa_description"):
listener.enterBa_description(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBa_description"):
listener.exitBa_description(self)
def ba_description(self):
localctx = tnsnamesParser.Ba_descriptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 130, self.RULE_ba_description)
try:
self.enterOuterAlt(localctx, 1)
self.state = 578
self.match(tnsnamesParser.L_PAREN)
self.state = 579
self.match(tnsnamesParser.DESCRIPTION)
self.state = 580
self.match(tnsnamesParser.EQUAL)
self.state = 581
self.bad_params()
self.state = 582
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Bad_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def bad_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Bad_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Bad_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_bad_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBad_params"):
listener.enterBad_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBad_params"):
listener.exitBad_params(self)
def bad_params(self):
localctx = tnsnamesParser.Bad_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 132, self.RULE_bad_params)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 585
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 584
self.bad_parameter()
self.state = 587
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la == tnsnamesParser.L_PAREN):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Bad_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def bad_local(self):
return self.getTypedRuleContext(tnsnamesParser.Bad_localContext, 0)
def bad_address(self):
return self.getTypedRuleContext(tnsnamesParser.Bad_addressContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_bad_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBad_parameter"):
listener.enterBad_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBad_parameter"):
listener.exitBad_parameter(self)
def bad_parameter(self):
localctx = tnsnamesParser.Bad_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 134, self.RULE_bad_parameter)
try:
self.state = 591
la_ = self._interp.adaptivePredict(self._input, 40, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 589
self.bad_local()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 590
self.bad_address()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Bad_localContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def LOCAL(self):
return self.getToken(tnsnamesParser.LOCAL, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def YES_NO(self):
return self.getToken(tnsnamesParser.YES_NO, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_bad_local
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBad_local"):
listener.enterBad_local(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBad_local"):
listener.exitBad_local(self)
def bad_local(self):
localctx = tnsnamesParser.Bad_localContext(self, self._ctx, self.state)
self.enterRule(localctx, 136, self.RULE_bad_local)
try:
self.enterOuterAlt(localctx, 1)
self.state = 593
self.match(tnsnamesParser.L_PAREN)
self.state = 594
self.match(tnsnamesParser.LOCAL)
self.state = 595
self.match(tnsnamesParser.EQUAL)
self.state = 596
self.match(tnsnamesParser.YES_NO)
self.state = 597
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Bad_addressContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def ADDRESS(self):
return self.getToken(tnsnamesParser.ADDRESS, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def beq_beq(self):
return self.getTypedRuleContext(tnsnamesParser.Beq_beqContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_bad_address
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBad_address"):
listener.enterBad_address(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBad_address"):
listener.exitBad_address(self)
def bad_address(self):
localctx = tnsnamesParser.Bad_addressContext(self, self._ctx, self.state)
self.enterRule(localctx, 138, self.RULE_bad_address)
try:
self.enterOuterAlt(localctx, 1)
self.state = 599
self.match(tnsnamesParser.L_PAREN)
self.state = 600
self.match(tnsnamesParser.ADDRESS)
self.state = 601
self.match(tnsnamesParser.EQUAL)
self.state = 602
self.beq_beq()
self.state = 603
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Connect_dataContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def CONNECT_DATA(self):
return self.getToken(tnsnamesParser.CONNECT_DATA, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def cd_params(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_paramsContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_connect_data
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterConnect_data"):
listener.enterConnect_data(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitConnect_data"):
listener.exitConnect_data(self)
def connect_data(self):
localctx = tnsnamesParser.Connect_dataContext(self, self._ctx, self.state)
self.enterRule(localctx, 140, self.RULE_connect_data)
try:
self.enterOuterAlt(localctx, 1)
self.state = 605
self.match(tnsnamesParser.L_PAREN)
self.state = 606
self.match(tnsnamesParser.CONNECT_DATA)
self.state = 607
self.match(tnsnamesParser.EQUAL)
self.state = 608
self.cd_params()
self.state = 609
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def cd_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Cd_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Cd_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_params"):
listener.enterCd_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_params"):
listener.exitCd_params(self)
def cd_params(self):
localctx = tnsnamesParser.Cd_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 142, self.RULE_cd_params)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 612
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 611
self.cd_parameter()
self.state = 614
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la == tnsnamesParser.L_PAREN):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def cd_service_name(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_service_nameContext, 0)
def cd_sid(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_sidContext, 0)
def cd_instance_name(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_instance_nameContext, 0)
def cd_failover_mode(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_failover_modeContext, 0)
def cd_global_name(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_global_nameContext, 0)
def cd_hs(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_hsContext, 0)
def cd_rdb_database(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_rdb_databaseContext, 0)
def cd_server(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_serverContext, 0)
def cd_ur(self):
return self.getTypedRuleContext(tnsnamesParser.Cd_urContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_parameter"):
listener.enterCd_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_parameter"):
listener.exitCd_parameter(self)
def cd_parameter(self):
localctx = tnsnamesParser.Cd_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 144, self.RULE_cd_parameter)
try:
self.state = 625
la_ = self._interp.adaptivePredict(self._input, 42, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 616
self.cd_service_name()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 617
self.cd_sid()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 618
self.cd_instance_name()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 619
self.cd_failover_mode()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 620
self.cd_global_name()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 621
self.cd_hs()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 622
self.cd_rdb_database()
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 623
self.cd_server()
pass
elif la_ == 9:
self.enterOuterAlt(localctx, 9)
self.state = 624
self.cd_ur()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_service_nameContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SERVICE_NAME(self):
return self.getToken(tnsnamesParser.SERVICE_NAME, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_service_name
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_service_name"):
listener.enterCd_service_name(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_service_name"):
listener.exitCd_service_name(self)
def cd_service_name(self):
localctx = tnsnamesParser.Cd_service_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 146, self.RULE_cd_service_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 627
self.match(tnsnamesParser.L_PAREN)
self.state = 628
self.match(tnsnamesParser.SERVICE_NAME)
self.state = 629
self.match(tnsnamesParser.EQUAL)
self.state = 630
self.match(tnsnamesParser.ID)
self.state = 631
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_sidContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SID(self):
return self.getToken(tnsnamesParser.SID, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_sid
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_sid"):
listener.enterCd_sid(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_sid"):
listener.exitCd_sid(self)
def cd_sid(self):
localctx = tnsnamesParser.Cd_sidContext(self, self._ctx, self.state)
self.enterRule(localctx, 148, self.RULE_cd_sid)
try:
self.enterOuterAlt(localctx, 1)
self.state = 633
self.match(tnsnamesParser.L_PAREN)
self.state = 634
self.match(tnsnamesParser.SID)
self.state = 635
self.match(tnsnamesParser.EQUAL)
self.state = 636
self.match(tnsnamesParser.ID)
self.state = 637
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_instance_nameContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def INSTANCE_NAME(self):
return self.getToken(tnsnamesParser.INSTANCE_NAME, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_instance_name
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_instance_name"):
listener.enterCd_instance_name(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_instance_name"):
listener.exitCd_instance_name(self)
def cd_instance_name(self):
localctx = tnsnamesParser.Cd_instance_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 150, self.RULE_cd_instance_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 639
self.match(tnsnamesParser.L_PAREN)
self.state = 640
self.match(tnsnamesParser.INSTANCE_NAME)
self.state = 641
self.match(tnsnamesParser.EQUAL)
self.state = 642
self.match(tnsnamesParser.ID)
self.state = 643
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_failover_modeContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def FAILOVER_MODE(self):
return self.getToken(tnsnamesParser.FAILOVER_MODE, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def fo_params(self):
return self.getTypedRuleContext(tnsnamesParser.Fo_paramsContext, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_failover_mode
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_failover_mode"):
listener.enterCd_failover_mode(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_failover_mode"):
listener.exitCd_failover_mode(self)
def cd_failover_mode(self):
localctx = tnsnamesParser.Cd_failover_modeContext(self, self._ctx, self.state)
self.enterRule(localctx, 152, self.RULE_cd_failover_mode)
try:
self.enterOuterAlt(localctx, 1)
self.state = 645
self.match(tnsnamesParser.L_PAREN)
self.state = 646
self.match(tnsnamesParser.FAILOVER_MODE)
self.state = 647
self.match(tnsnamesParser.EQUAL)
self.state = 648
self.fo_params()
self.state = 649
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_global_nameContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def GLOBAL_NAME(self):
return self.getToken(tnsnamesParser.GLOBAL_NAME, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_global_name
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_global_name"):
listener.enterCd_global_name(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_global_name"):
listener.exitCd_global_name(self)
def cd_global_name(self):
localctx = tnsnamesParser.Cd_global_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 154, self.RULE_cd_global_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 651
self.match(tnsnamesParser.L_PAREN)
self.state = 652
self.match(tnsnamesParser.GLOBAL_NAME)
self.state = 653
self.match(tnsnamesParser.EQUAL)
self.state = 654
self.match(tnsnamesParser.ID)
self.state = 655
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_hsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def HS(self):
return self.getToken(tnsnamesParser.HS, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def OK(self):
return self.getToken(tnsnamesParser.OK, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_hs
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_hs"):
listener.enterCd_hs(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_hs"):
listener.exitCd_hs(self)
def cd_hs(self):
localctx = tnsnamesParser.Cd_hsContext(self, self._ctx, self.state)
self.enterRule(localctx, 156, self.RULE_cd_hs)
try:
self.enterOuterAlt(localctx, 1)
self.state = 657
self.match(tnsnamesParser.L_PAREN)
self.state = 658
self.match(tnsnamesParser.HS)
self.state = 659
self.match(tnsnamesParser.EQUAL)
self.state = 660
self.match(tnsnamesParser.OK)
self.state = 661
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_rdb_databaseContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def RDB_DATABASE(self):
return self.getToken(tnsnamesParser.RDB_DATABASE, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self, i: int = None):
if i is None:
return self.getTokens(tnsnamesParser.ID)
else:
return self.getToken(tnsnamesParser.ID, i)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def L_SQUARE(self):
return self.getToken(tnsnamesParser.L_SQUARE, 0)
def DOT(self):
return self.getToken(tnsnamesParser.DOT, 0)
def R_SQUARE(self):
return self.getToken(tnsnamesParser.R_SQUARE, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_rdb_database
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_rdb_database"):
listener.enterCd_rdb_database(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_rdb_database"):
listener.exitCd_rdb_database(self)
def cd_rdb_database(self):
localctx = tnsnamesParser.Cd_rdb_databaseContext(self, self._ctx, self.state)
self.enterRule(localctx, 158, self.RULE_cd_rdb_database)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 663
self.match(tnsnamesParser.L_PAREN)
self.state = 664
self.match(tnsnamesParser.RDB_DATABASE)
self.state = 665
self.match(tnsnamesParser.EQUAL)
self.state = 670
_la = self._input.LA(1)
if _la == tnsnamesParser.L_SQUARE:
self.state = 666
self.match(tnsnamesParser.L_SQUARE)
self.state = 667
self.match(tnsnamesParser.DOT)
self.state = 668
self.match(tnsnamesParser.ID)
self.state = 669
self.match(tnsnamesParser.R_SQUARE)
self.state = 672
self.match(tnsnamesParser.ID)
self.state = 673
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_serverContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def SERVER(self):
return self.getToken(tnsnamesParser.SERVER, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def DEDICATED(self):
return self.getToken(tnsnamesParser.DEDICATED, 0)
def SHARED(self):
return self.getToken(tnsnamesParser.SHARED, 0)
def POOLED(self):
return self.getToken(tnsnamesParser.POOLED, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_server
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_server"):
listener.enterCd_server(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_server"):
listener.exitCd_server(self)
def cd_server(self):
localctx = tnsnamesParser.Cd_serverContext(self, self._ctx, self.state)
self.enterRule(localctx, 160, self.RULE_cd_server)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 675
self.match(tnsnamesParser.L_PAREN)
self.state = 676
self.match(tnsnamesParser.SERVER)
self.state = 677
self.match(tnsnamesParser.EQUAL)
self.state = 678
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & (
(1 << tnsnamesParser.DEDICATED) | (1 << tnsnamesParser.SHARED) | (
1 << tnsnamesParser.POOLED))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 679
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cd_urContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def UR(self):
return self.getToken(tnsnamesParser.UR, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def UR_A(self):
return self.getToken(tnsnamesParser.UR_A, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_cd_ur
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCd_ur"):
listener.enterCd_ur(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCd_ur"):
listener.exitCd_ur(self)
def cd_ur(self):
localctx = tnsnamesParser.Cd_urContext(self, self._ctx, self.state)
self.enterRule(localctx, 162, self.RULE_cd_ur)
try:
self.enterOuterAlt(localctx, 1)
self.state = 681
self.match(tnsnamesParser.L_PAREN)
self.state = 682
self.match(tnsnamesParser.UR)
self.state = 683
self.match(tnsnamesParser.EQUAL)
self.state = 684
self.match(tnsnamesParser.UR_A)
self.state = 685
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Fo_paramsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def fo_parameter(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(tnsnamesParser.Fo_parameterContext)
else:
return self.getTypedRuleContext(tnsnamesParser.Fo_parameterContext, i)
def getRuleIndex(self):
return tnsnamesParser.RULE_fo_params
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFo_params"):
listener.enterFo_params(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFo_params"):
listener.exitFo_params(self)
def fo_params(self):
localctx = tnsnamesParser.Fo_paramsContext(self, self._ctx, self.state)
self.enterRule(localctx, 164, self.RULE_fo_params)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 688
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 687
self.fo_parameter()
self.state = 690
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la == tnsnamesParser.L_PAREN):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Fo_parameterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def fo_type(self):
return self.getTypedRuleContext(tnsnamesParser.Fo_typeContext, 0)
def fo_backup(self):
return self.getTypedRuleContext(tnsnamesParser.Fo_backupContext, 0)
def fo_method(self):
return self.getTypedRuleContext(tnsnamesParser.Fo_methodContext, 0)
def fo_retries(self):
return self.getTypedRuleContext(tnsnamesParser.Fo_retriesContext, 0)
def fo_delay(self):
return self.getTypedRuleContext(tnsnamesParser.Fo_delayContext, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_fo_parameter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFo_parameter"):
listener.enterFo_parameter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFo_parameter"):
listener.exitFo_parameter(self)
def fo_parameter(self):
localctx = tnsnamesParser.Fo_parameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 166, self.RULE_fo_parameter)
try:
self.state = 697
la_ = self._interp.adaptivePredict(self._input, 45, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 692
self.fo_type()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 693
self.fo_backup()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 694
self.fo_method()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 695
self.fo_retries()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 696
self.fo_delay()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Fo_typeContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def TYPE(self):
return self.getToken(tnsnamesParser.TYPE, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def SESSION(self):
return self.getToken(tnsnamesParser.SESSION, 0)
def SELECT(self):
return self.getToken(tnsnamesParser.SELECT, 0)
def NONE(self):
return self.getToken(tnsnamesParser.NONE, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_fo_type
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFo_type"):
listener.enterFo_type(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFo_type"):
listener.exitFo_type(self)
def fo_type(self):
localctx = tnsnamesParser.Fo_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 168, self.RULE_fo_type)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 699
self.match(tnsnamesParser.L_PAREN)
self.state = 700
self.match(tnsnamesParser.TYPE)
self.state = 701
self.match(tnsnamesParser.EQUAL)
self.state = 702
_la = self._input.LA(1)
if not (((((_la - 68)) & ~0x3f) == 0 and ((1 << (_la - 68)) & (
(1 << (tnsnamesParser.SESSION - 68)) | (1 << (tnsnamesParser.SELECT - 68)) | (
1 << (tnsnamesParser.NONE - 68)))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 703
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Fo_backupContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def BACKUP(self):
return self.getToken(tnsnamesParser.BACKUP, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def ID(self):
return self.getToken(tnsnamesParser.ID, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_fo_backup
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFo_backup"):
listener.enterFo_backup(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFo_backup"):
listener.exitFo_backup(self)
def fo_backup(self):
localctx = tnsnamesParser.Fo_backupContext(self, self._ctx, self.state)
self.enterRule(localctx, 170, self.RULE_fo_backup)
try:
self.enterOuterAlt(localctx, 1)
self.state = 705
self.match(tnsnamesParser.L_PAREN)
self.state = 706
self.match(tnsnamesParser.BACKUP)
self.state = 707
self.match(tnsnamesParser.EQUAL)
self.state = 708
self.match(tnsnamesParser.ID)
self.state = 709
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Fo_methodContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def METHOD(self):
return self.getToken(tnsnamesParser.METHOD, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def BASIC(self):
return self.getToken(tnsnamesParser.BASIC, 0)
def PRECONNECT(self):
return self.getToken(tnsnamesParser.PRECONNECT, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_fo_method
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFo_method"):
listener.enterFo_method(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFo_method"):
listener.exitFo_method(self)
def fo_method(self):
localctx = tnsnamesParser.Fo_methodContext(self, self._ctx, self.state)
self.enterRule(localctx, 172, self.RULE_fo_method)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 711
self.match(tnsnamesParser.L_PAREN)
self.state = 712
self.match(tnsnamesParser.METHOD)
self.state = 713
self.match(tnsnamesParser.EQUAL)
self.state = 714
_la = self._input.LA(1)
if not (_la == tnsnamesParser.BASIC or _la == tnsnamesParser.PRECONNECT):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 715
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Fo_retriesContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def RETRIES(self):
return self.getToken(tnsnamesParser.RETRIES, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def INT(self):
return self.getToken(tnsnamesParser.INT, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_fo_retries
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFo_retries"):
listener.enterFo_retries(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFo_retries"):
listener.exitFo_retries(self)
def fo_retries(self):
localctx = tnsnamesParser.Fo_retriesContext(self, self._ctx, self.state)
self.enterRule(localctx, 174, self.RULE_fo_retries)
try:
self.enterOuterAlt(localctx, 1)
self.state = 717
self.match(tnsnamesParser.L_PAREN)
self.state = 718
self.match(tnsnamesParser.RETRIES)
self.state = 719
self.match(tnsnamesParser.EQUAL)
self.state = 720
self.match(tnsnamesParser.INT)
self.state = 721
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Fo_delayContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def L_PAREN(self):
return self.getToken(tnsnamesParser.L_PAREN, 0)
def DELAY(self):
return self.getToken(tnsnamesParser.DELAY, 0)
def EQUAL(self):
return self.getToken(tnsnamesParser.EQUAL, 0)
def INT(self):
return self.getToken(tnsnamesParser.INT, 0)
def R_PAREN(self):
return self.getToken(tnsnamesParser.R_PAREN, 0)
def getRuleIndex(self):
return tnsnamesParser.RULE_fo_delay
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFo_delay"):
listener.enterFo_delay(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFo_delay"):
listener.exitFo_delay(self)
def fo_delay(self):
localctx = tnsnamesParser.Fo_delayContext(self, self._ctx, self.state)
self.enterRule(localctx, 176, self.RULE_fo_delay)
try:
self.enterOuterAlt(localctx, 1)
self.state = 723
self.match(tnsnamesParser.L_PAREN)
self.state = 724
self.match(tnsnamesParser.DELAY)
self.state = 725
self.match(tnsnamesParser.EQUAL)
self.state = 726
self.match(tnsnamesParser.INT)
self.state = 727
self.match(tnsnamesParser.R_PAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
|
erget/tnsmaster
|
tnsnames/tnsnamesParser.py
|
Python
|
mit
| 212,500
|
"""
Confirmation screen for peer calibration and grading.
"""
from bok_choy.page_object import PageObject
class PeerConfirmPage(PageObject):
"""
Confirmation for peer calibration and grading.
"""
url = None
def is_browser_on_page(self):
return self.is_css_present('section.calibration-interstitial-page')
def start(self, is_calibrating=False):
"""
Continue to the next section after the confirmation page.
If `is_calibrating` is false, try to continue to peer grading.
Otherwise, try to continue to calibration grading.
"""
self.css_click(
'input.calibration-interstitial-page-button'
if is_calibrating else 'input.interstitial-page-button'
)
|
pku9104038/edx-platform
|
common/test/acceptance/pages/lms/peer_confirm.py
|
Python
|
agpl-3.0
| 761
|
from django.contrib import admin
from .models import Job
# Register your models here.
admin.site.register(Job)
|
WilliamQLiu/job-waffle
|
employer/admin.py
|
Python
|
apache-2.0
| 113
|
import os
import numpy as np
import ctypes as C
from obspy.signal.headers import clibevresp
from obspy.core.util.base import NamedTemporaryFile
from obspy.signal.invsim import cornFreq2Paz, pazToFreqResp, c_sac_taper
from miic.core.miic_utils import nextpow2
def evalresp(t_samp, nfft, filename, date, station='*', channel='*',
network='*', locid='*', units="VEL", start_stage=-1, stop_stage=0, freq=False,
debug=False):
"""
Use the evalresp library to extract instrument response
information from a SEED RESP-file. To restrict the response to the
instrument the start and stop stages can be specified here.
:type t_samp: float
:param t_samp: Sampling interval in seconds
:type nfft: int
:param nfft: Number of FFT points of signal which needs correction
:type filename: str (or open file like object)
:param filename: SEED RESP-filename or open file like object with RESP
information. Any object that provides a read() method will be
considered to be a file like object.
:type date: UTCDateTime
:param date: Date of interest
:type station: str
:param station: Station id
:type channel: str
:param channel: Channel id
:type network: str
:param network: Network id
:type locid: str
:param locid: Location id
:type units: str
:param units: Units to return response in. Can be either DIS, VEL or ACC
:type start_stage: int
:param start_stage: integer stage numbers of start stage (<0 causes
default evalresp bahaviour).
:type stop_stage: int
:param stop_stage: integer stage numbers of stop stage
:type debug: bool
:param debug: Verbose output to stdout. Disabled by default.
:rtype: numpy.ndarray complex128
:return: Frequency response from SEED RESP-file of length nfft
"""
if isinstance(filename, basestring):
with open(filename, 'rb') as fh:
data = fh.read()
elif hasattr(filename, 'read'):
data = filename.read()
# evalresp needs files with correct line separators depending on OS
fh = NamedTemporaryFile()
#with NamedTemporaryFile() as fh:
if 1:
tempfile = fh.name
fh.write(os.linesep.join(data.splitlines()))
fh.close()
fy = 1 / (t_samp * 2.0)
# start at zero to get zero for offset/ DC of fft
freqs = np.linspace(0, fy, nfft // 2 + 1)
start_stage_c = C.c_int(start_stage)
stop_stage_c = C.c_int(stop_stage)
stdio_flag = C.c_int(0)
sta = C.create_string_buffer(station)
cha = C.create_string_buffer(channel)
net = C.create_string_buffer(network)
locid = C.create_string_buffer(locid)
unts = C.create_string_buffer(units)
if debug:
vbs = C.create_string_buffer("-v")
else:
vbs = C.create_string_buffer("")
rtyp = C.create_string_buffer("CS")
datime = C.create_string_buffer(date.formatSEED())
fn = C.create_string_buffer(tempfile)
nfreqs = C.c_int(freqs.shape[0])
res = clibevresp.evresp(sta, cha, net, locid, datime, unts, fn,
freqs, nfreqs, rtyp, vbs, start_stage_c,
stop_stage_c, stdio_flag, C.c_int(0))
# optimizing performance, see
# http://wiki.python.org/moin/PythonSpeed/PerformanceTips
nfreqs, rfreqs, rvec = res[0].nfreqs, res[0].freqs, res[0].rvec
h = np.empty(nfreqs, dtype='complex128')
f = np.empty(nfreqs, dtype='float64')
for i in xrange(nfreqs):
h[i] = rvec[i].real + rvec[i].imag * 1j
f[i] = rfreqs[i]
clibevresp.free_response(res)
del nfreqs, rfreqs, rvec, res
if freq:
return h, f
return h
def correct_response(st, removeResp=False, removePAZ=False, simPAZ=False, pre_filt=None, cornFreq=0.0083):
"""
Correct the seismometer response.
Seismometer response is given in either a dictionary ``removeResp''
or a dictionary ``removePAZ''. ``removeResp has precedence. The
dictionaries have the following structure
removeResp: dictionary with Response information to be removed
has the following keys:
respfile: (str) filename of evalresp response file.
units: (str) Units to return response in. Can be either DIS, VEL or ACC
start_stage: (int) integer stage numbers of start stage (<0 causes
default evalresp bahaviour).
stop_stage: (int) integer stage numbers of stop stage
removePAZ: dictionary with poles and zeros to be removed has the following
keys:
poles: (list of complex numbers) location of poles
zeros: (list of complex numbers) location of zeros
gain: (float) gain
sensitivity: (float) sensitivity
It can easily be retrieved with obspy.arclink.client.Client.getPAZ
if ``removeResp'' is given the response of each trace must be present in
the respfile. If ``removePAZ'' is used the response is assumed to be the
same for all traces in the stream.
A filter specified in pre_filt can be applied in to avoid amplification of
noise.
The instrument to be simulated is either described in the dictionary simPAZ
or if simPAZ is False by the corner frequency ``cornFreq''. Response
correction is done in place and original data is overwritten.
The input stream ``st'' should be demeaned and tapered.
:type st: obspy.core.stream.Stream
:param st: data stream to be corrected
:type removeResp: dict
:param removeResp: Response information to be removed
:type removePAZ: dict
:param removePAZ: Response information to be removed
:type simPAZ: dict
:param simPAZ: Response information to be simulated
:type cornFreq: float
:param cornFreq: corner frequency of instrument to be simulated
:type pre_filt: list
:param pre_filt: 4 corners of the filter
"""
for tr in st:
starttime = tr.stats['starttime']
endtime = tr.stats['endtime']
network = tr.stats['network']
station = tr.stats['station']
channel = tr.stats['channel']
location = tr.stats['location']
length = tr.stats['npts']
sampling_rate = tr.stats['sampling_rate']
np2l = nextpow2(2.*length)
if not simPAZ:
simPAZ = cornFreq2Paz(cornFreq, damp=0.70716)
simresp, freqs = np.conj(pazToFreqResp(simPAZ['poles'], simPAZ['zeros'],
scale_fac=simPAZ['gain']*simPAZ['sensitivity'],
t_samp=1./sampling_rate,
nfft=np2l, freq=True)) #see Doc of pazToFreqResp for reason of conj()
if removeResp:
freqresp, freqs = evalresp(1./sampling_rate,np2l,removeResp['respfile'],
starttime, network=network, station=station,
channel=channel, locid=location,
start_stage=removeResp['start_stage'],
stop_stage=removeResp['stop_stage'],
units=removeResp['units'], freq=True)
else:
freqresp, freqs = np.conj(pazToFreqResp(removePAZ['poles'], removePAZ['zeros'],
scale_fac=removePAZ['gain']*removePAZ['sensitivity'],
t_samp=1./sampling_rate,
nfft=np2l, freq=True)) #see Doc of pazToFreqResp for reason of conj()
ftr = np.fft.rfft(tr.data,n=np2l)
ftr /= freqresp
ftr[0] = 0.j # correct the NaN in the DC component
ftr *= simresp
if pre_filt:
ftr *= c_sac_taper(freqs, flimit=pre_filt)
tr.data = np.fft.irfft(ftr)
tr.trim(starttime,endtime)
return
|
miic-sw/miic
|
miic.core/src/miic/core/response_correction.py
|
Python
|
gpl-3.0
| 8,071
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
title = "Bednets"
tab_link = "/bednets"
a = "Alpha"
b = "Beta"
|
takinbo/rapidsms-borno
|
apps/bednets/config.py
|
Python
|
lgpl-3.0
| 122
|
"""Tests for the WLED config flow."""
from unittest.mock import MagicMock
from wled import WLEDConnectionError
from homeassistant.components import zeroconf
from homeassistant.components.wled.const import CONF_KEEP_MASTER_LIGHT, DOMAIN
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.common import MockConfigEntry
async def test_full_user_flow_implementation(
hass: HomeAssistant, mock_wled_config_flow: MagicMock, mock_setup_entry: None
) -> None:
"""Test the full manual user flow from start to finish."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result.get("step_id") == "user"
assert result.get("type") == RESULT_TYPE_FORM
assert "flow_id" in result
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "192.168.1.123"}
)
assert result.get("title") == "WLED RGB Light"
assert result.get("type") == RESULT_TYPE_CREATE_ENTRY
assert "data" in result
assert result["data"][CONF_HOST] == "192.168.1.123"
assert "result" in result
assert result["result"].unique_id == "aabbccddeeff"
async def test_full_zeroconf_flow_implementation(
hass: HomeAssistant, mock_wled_config_flow: MagicMock, mock_setup_entry: None
) -> None:
"""Test the full manual user flow from start to finish."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.123",
hostname="example.local.",
name="mock_name",
port=None,
properties={CONF_MAC: "aabbccddeeff"},
type="mock_type",
),
)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert result.get("description_placeholders") == {CONF_NAME: "WLED RGB Light"}
assert result.get("step_id") == "zeroconf_confirm"
assert result.get("type") == RESULT_TYPE_FORM
assert "flow_id" in result
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2.get("title") == "WLED RGB Light"
assert result2.get("type") == RESULT_TYPE_CREATE_ENTRY
assert "data" in result2
assert result2["data"][CONF_HOST] == "192.168.1.123"
assert "result" in result2
assert result2["result"].unique_id == "aabbccddeeff"
async def test_connection_error(
hass: HomeAssistant, mock_wled_config_flow: MagicMock
) -> None:
"""Test we show user form on WLED connection error."""
mock_wled_config_flow.update.side_effect = WLEDConnectionError
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "example.com"},
)
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("step_id") == "user"
assert result.get("errors") == {"base": "cannot_connect"}
async def test_zeroconf_connection_error(
hass: HomeAssistant, mock_wled_config_flow: MagicMock
) -> None:
"""Test we abort zeroconf flow on WLED connection error."""
mock_wled_config_flow.update.side_effect = WLEDConnectionError
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.123",
hostname="example.local.",
name="mock_name",
port=None,
properties={CONF_MAC: "aabbccddeeff"},
type="mock_type",
),
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "cannot_connect"
async def test_user_device_exists_abort(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_wled_config_flow: MagicMock,
) -> None:
"""Test we abort zeroconf flow if WLED device already configured."""
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "192.168.1.123"},
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "already_configured"
async def test_zeroconf_without_mac_device_exists_abort(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_wled_config_flow: MagicMock,
) -> None:
"""Test we abort zeroconf flow if WLED device already configured."""
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.123",
hostname="example.local.",
name="mock_name",
port=None,
properties={},
type="mock_type",
),
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "already_configured"
async def test_zeroconf_with_mac_device_exists_abort(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_wled_config_flow: MagicMock,
) -> None:
"""Test we abort zeroconf flow if WLED device already configured."""
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.123",
hostname="example.local.",
name="mock_name",
port=None,
properties={CONF_MAC: "aabbccddeeff"},
type="mock_type",
),
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "already_configured"
async def test_options_flow(
hass: HomeAssistant, mock_config_entry: MockConfigEntry
) -> None:
"""Test options config flow."""
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(mock_config_entry.entry_id)
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("step_id") == "init"
assert "flow_id" in result
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_KEEP_MASTER_LIGHT: True},
)
assert result2.get("type") == RESULT_TYPE_CREATE_ENTRY
assert result2.get("data") == {
CONF_KEEP_MASTER_LIGHT: True,
}
|
mezz64/home-assistant
|
tests/components/wled/test_config_flow.py
|
Python
|
apache-2.0
| 6,761
|
# -*- coding: utf-8 -*-
from openerp.osv import osv,fields
from openerp import SUPERUSER_ID
class product_product(osv.Model):
_inherit = 'product.product'
def generate_ean13(self, cr, uid, ids, context=None):
if context is None: context = {}
generate_context = context.copy()
product_ids = self.browse(cr, uid, ids, context=context)
seq_ean13_to_weight = product_ids.env.ref('product_barcode_generator_custom.seq_ean13_to_weight')
seq_ean13_internal = product_ids.env.ref('product_barcode_generator_custom.seq_ean13_internal')
for product in product_ids:
if product.ean13:
continue
if product.to_weight:
sequence_id = seq_ean13_to_weight.id
else:
sequence_id = seq_ean13_internal.id
generate_context.update({'sequence_id':sequence_id})
ean13 = self._generate_ean13_value(cr, uid, product, context=generate_context)
if not ean13:
continue
self.write(cr, uid, [product.id], {
'ean_sequence_id':sequence_id,
'ean13': ean13,
}, context=context)
return True
|
germanponce/pos-addons
|
product_barcode_generator_custom/models.py
|
Python
|
lgpl-3.0
| 1,215
|
# -*- coding: utf-8 -*-
from datetime import date
import scrapy
from ..db import ads_db
from ..items import AdvertisementItem
class TipmotoSpider(scrapy.Spider):
name = "tipmoto"
allowed_domains = ["www.tipmoto.com", "www.motoinzerce.cz"]
start_urls = (
'http://www.motoinzerce.cz/hledat.php?cenaod=&cenado=50000&vykonod=25&vykondo=35&razeni=cenaa&submit.x=0&submit.y=0&strankovani=100',
'http://www.tipmoto.com/hledat.php?vykonod=25&vykondo=35&cenaod=&cenado=50000&razeni=cenaa&submit.x=0&submit.y=0&strankovani=100',
)
def parse_moto(self, response):
ad = response.css("#prava-in")
title = ad.xpath("h1/text()").extract_first()
description = ad.xpath("div/div[@id='detail-popis']/h2[text()='Popis:']/following-sibling::p/text()").extract_first()
price = ad.xpath("p[@id='detail-cena']/text()").extract_first().strip()
year = ad.css("div.indent p.d33.d1").re(u"<strong>(Vyrobeno|Provoz):</strong>\s*(\d+)")
year = year[1] if year else None
mileage = ad.css("div.indent p.d33.d1").re(u"<strong>Najeto:</strong>\s*(\d+) km")
mileage = mileage[0] if mileage else None
power = ad.css("div.indent p.d33.d2").re(u"<strong>Výkon:</strong>\s*([0-9\.,]+)")[0]
date_ = ad.re(u'<strong>Vloženo:</strong>\s*<span>(.*)</span>') or None
if date_:
d, m, y = map(int, date_[0].split("."))
date_ = date(y, m, d)
return AdvertisementItem(title=title, description=description, price=price, power=power,
year=year, mileage=mileage, permalink=response.url, date=date_)
def parse(self, response):
for ad in response.css("table.vypis tr:not(.th):not(.prvnitipmoto):not(.tipmoto):not(.tipmotoposledni)"):
url = response.urljoin(ad.xpath("td[1]/a/@href").extract_first())
if url in ads_db:
yield None
else:
yield scrapy.Request(url, self.parse_moto)
|
sairon/motoscrape
|
motoscrape/spiders/tipmoto.py
|
Python
|
unlicense
| 2,002
|
#!/usr/bin/python
from __future__ import print_function
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from sys import argv
from time import sleep
class SingleSwitchTopo(Topo):
"Single switch connected to n hosts."
def __init__(self, n=2, lossy=False, **opts):
Topo.__init__(self, **opts)
switch = self.addSwitch('s1')
for h in range(n):
# Each host gets 50%/n of system CPU
host = self.addHost('h%s' % (h + 1),
cpu=.9 / n)
# 10 Mbps, 5ms delay, no packet loss
self.addLink(host, switch, bw=10, delay='5ms', loss=0, use_htb=True)
def writeConfigFile(r,q1,q2,g1,g2):
filename = "config_r"+str(r)+"_q"+str(q2)+"_g"+str(g2)
f = open(filename, 'w')
# add replicas
for id in range(0,r):
f.write('replica '+str(id)+' 10.0.0.'+str(id+1)+' 880'+str(id)+'\n')
# add rest of config
f.write('quorum-1 '+str(q1)+'\n')
f.write('quorum-2 '+str(q2)+'\n')
f.write('group-1 '+str(g1)+'\n')
f.write('group-2 '+str(g2)+'\n')
f.write('learner-catch-up no\n')
f.write('lmdb-env-path /tmp/acceptor\n')
return filename
def libFPaxosTest(r,q1,q2,thrifty):
"Create network and run simple performance test"
topo = SingleSwitchTopo( n=r+1 )
net = Mininet( topo=topo,
host=CPULimitedHost, link=TCLink,
autoStaticArp=True )
net.start()
print( "Dumping host connections" )
dumpNodeConnections(net.hosts)
print( "Running libFPaxos" )
path = '../../build/sample/'
if thrifty:
config_file = writeConfigFile(r,q1,q2,r,q2)
else:
config_file = writeConfigFile(r,q1,q2,r,r)
# start replicas
for id in range(1,r+1):
host = net.get('h'+ str(id))
print( "Starting libFPaxos on replica "+str(id))
stdout = host.cmd(path + 'replica '+str(id-1)+' '+config_file+' &>> '+str(id-1)+'.log &')
print(stdout)
# start client
sleep(2)
c = net.get('h'+str(r+1))
print( "Starting libFPaxos client")
stdout = c.cmd(path + 'client '+config_file+' -p 0 -o 10 &>> client-'+config_file+'.log &')
print(stdout)
pid = int( c.cmd('echo $!') )
sleep(120)
print("killing process "+str(pid))
stdout = c.cmd('kill '+str(pid))
sleep(1)
print(stdout)
net.stop()
def libPaxos(r):
libFPaxosTest(r,(r/2)+1,(r/2)+1,False)
def libFPaxos(r,q2):
libFPaxosTest(r,(r-q2)+1,q2,True)
if __name__ == '__main__':
setLogLevel( 'info' )
for n in range (3,11):
for q in range (1,(n/2)+2):
libFPaxos(n,q)
|
fpaxos/fpaxos-test
|
deploy_mininet.py
|
Python
|
mit
| 2,771
|
#!/usr/bin/python3
# coding: utf-8
# data_content.py
import os
class DataContent(object):
"""
Class to identify a column or table based on its content.
It will use a reference table to specify known content types
such as
transactions CONTAINS [ date col, amount col, desc, customer]
web logs CONTAINS [ date, ip, file , type (OPT)]
TODO
- use the sample files on rawdata.data.samples as a starting point
"""
pass
|
acutesoftware/AIKIF
|
aikif/dataTools/cls_data_content.py
|
Python
|
gpl-3.0
| 482
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Task', fields ['name', 'description', 'course']
db.create_unique(u'courses_task', ['name', 'description', 'course_id'])
def backwards(self, orm):
# Removing unique constraint on 'Task', fields ['name', 'description', 'course']
db.delete_unique(u'courses_task', ['name', 'description', 'course_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'courses.certificate': {
'Meta': {'object_name': 'Certificate'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.CourseAssignment']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issues_closed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'issues_opened': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_commits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'weekly_commits': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['courses.WeeklyCommit']", 'symmetrical': 'False'})
},
u'courses.course': {
'Meta': {'object_name': 'Course'},
'SEO_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'SEO_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'application_until': ('django.db.models.fields.DateField', [], {}),
'applications_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'ask_for_favorite_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('tinymce.models.HTMLField', [], {}),
'git_repository': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'next_season_mail_list': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['courses.Partner']", 'null': 'True', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'show_on_index': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80'})
},
u'courses.partner': {
'Meta': {'ordering': "('ordering',)", 'object_name': 'Partner'},
'description': ('tinymce.models.HTMLField', [], {}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'ordering': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'courses.task': {
'Meta': {'unique_together': "(('name', 'description', 'course'),)", 'object_name': 'Task'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['courses.Course']"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_exam': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'week': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'courses.weeklycommit': {
'Meta': {'object_name': 'WeeklyCommit'},
'commits_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'forum.category': {
'Meta': {'ordering': "('ordering',)", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'forum.topic': {
'Meta': {'object_name': 'Topic'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['forum.Category']"}),
'date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'students.courseassignment': {
'Meta': {'unique_together': "(('user', 'course'),)", 'object_name': 'CourseAssignment'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['courses.Course']"}),
'cv': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'favourite_partners': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['courses.Partner']", 'symmetrical': 'False'}),
'group_time': ('django.db.models.fields.SmallIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {'default': "'0'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.User']"})
},
u'students.user': {
'Meta': {'object_name': 'User'},
'avatar': ('django_resized.forms.ResizedImageField', [], {'max_length': '100', 'max_width': '200', 'blank': 'True'}),
'courses': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['courses.Course']", 'through': u"orm['students.CourseAssignment']", 'symmetrical': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'github_account': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hr_of': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['courses.Partner']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'linkedin_account': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '17', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'subscribed_topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['forum.Topic']", 'symmetrical': 'False', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'works_at': ('django.db.models.fields.CharField', [], {'max_length': "'40'", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['courses']
|
HackBulgaria/Odin
|
courses/south_migrations/0023_auto__add_unique_task_name_description_course.py
|
Python
|
agpl-3.0
| 11,628
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field receiving_user on 'Activity'
db.create_table(u'activity_receiving_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activity', models.ForeignKey(orm['forum.activity'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique(u'activity_receiving_users', ['activity_id', 'user_id'])
def backwards(self, orm):
# Removing M2M table for field receiving_user on 'Activity'
db.delete_table('activity_receiving_users')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'tag_filter_setting': ('django.db.models.fields.CharField', [], {'default': "'ignored'", 'max_length': '16'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forum.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'to': "orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['forum.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'forum.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'forum.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['forum.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'forum.answerrevision': {
'Meta': {'object_name': 'AnswerRevision', 'db_table': "u'answer_revision'"},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['forum.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answerrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'forum.authkeyuserassociation': {
'Meta': {'object_name': 'AuthKeyUserAssociation'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_keys'", 'to': "orm['auth.User']"})
},
'forum.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['forum.Badge']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'forum.badge': {
'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Badge', 'db_table': "u'badge'"},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {})
},
'forum.book': {
'Meta': {'object_name': 'Book', 'db_table': "u'book'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cover_img': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'pages': ('django.db.models.fields.SmallIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'publication': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'published_at': ('django.db.models.fields.DateTimeField', [], {}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'book'", 'db_table': "'book_question'", 'to': "orm['forum.Question']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.bookauthorinfo': {
'Meta': {'object_name': 'BookAuthorInfo', 'db_table': "u'book_author_info'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.bookauthorrss': {
'Meta': {'object_name': 'BookAuthorRss', 'db_table': "u'book_author_rss'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_created_at': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'forum.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'forum.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'forum.flaggeditem': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'FlaggedItem', 'db_table': "u'flagged_item'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'flagged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'to': "orm['auth.User']"})
},
'forum.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['forum.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'forum.mention': {
'Meta': {'object_name': 'Mention', 'db_table': "u'mention'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mentioned_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'mentioned_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentions_sent'", 'to': "orm['auth.User']"}),
'mentioned_whom': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentions_received'", 'to': "orm['auth.User']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'forum.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'through': "'FavoriteQuestion'", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'to': "orm['forum.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'forum.questionrevision': {
'Meta': {'object_name': 'QuestionRevision', 'db_table': "u'question_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['forum.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'forum.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['forum.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'forum.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.tag': {
'Meta': {'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'forum.validationhash': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'ValidationHash'},
'expiration': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 5, 18, 11, 49, 24, 344026)'}),
'hash_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seed': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
}
}
complete_apps = ['forum']
|
samhoo/askbot-realworld
|
askbot/migrations/0010_add_receiving_user_to_activity_model.py
|
Python
|
gpl-3.0
| 30,800
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-14 16:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('cover', models.CharField(max_length=100)),
('datetime', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('desc', models.CharField(max_length=100)),
('path', models.CharField(max_length=100)),
('datetime', models.DateTimeField()),
('albumId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gallery.Album')),
],
),
]
|
wenxiaomao1023/wenxiaomao
|
gallery/migrations/0001_initial.py
|
Python
|
mit
| 1,203
|
"Utilities for loading models and the modules that contain them."
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
import sys
import os
import threading
__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models',
'load_app', 'app_cache_ready')
class AppCache(object):
"""
A cache that stores installed applications and their models. Used to
provide reverse-relations and for app introspection (e.g. admin).
"""
# Use the Borg pattern to share state between all instances. Details at
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531.
__shared_state = dict(
# Keys of app_store are the model modules for each application.
app_store = SortedDict(),
# Mapping of installed app_labels to model modules for that app.
app_labels = {},
# Mapping of app_labels to a dictionary of model names to model code.
# May contain apps that are not installed.
app_models = SortedDict(),
# Mapping of app_labels to errors raised when trying to import the app.
app_errors = {},
# -- Everything below here is only used when populating the cache --
loaded = False,
handled = {},
postponed = [],
nesting_level = 0,
write_lock = threading.RLock(),
_get_models_cache = {},
)
def __init__(self):
self.__dict__ = self.__shared_state
def _populate(self):
"""
Fill in all the cache information. This method is threadsafe, in the
sense that every caller will see the same state upon return, and if the
cache is already initialised, it does no work.
"""
if self.loaded:
return
self.write_lock.acquire()
try:
if self.loaded:
return
for app_name in settings.INSTALLED_APPS:
if app_name in self.handled:
continue
self.load_app(app_name, True)
if not self.nesting_level:
for app_name in self.postponed:
self.load_app(app_name)
self.loaded = True
finally:
self.write_lock.release()
def _label_for(self, app_mod):
"""
Return app_label for given models module.
"""
return app_mod.__name__.split('.')[-2]
def load_app(self, app_name, can_postpone=False):
"""
Loads the app with the provided fully qualified name, and returns the
model module.
"""
self.handled[app_name] = None
self.nesting_level += 1
app_module = import_module(app_name)
try:
models = import_module('.models', app_name)
except ImportError:
self.nesting_level -= 1
# If the app doesn't have a models module, we can just ignore the
# ImportError and return no models for it.
if not module_has_submodule(app_module, 'models'):
return None
# But if the app does have a models module, we need to figure out
# whether to suppress or propagate the error. If can_postpone is
# True then it may be that the package is still being imported by
# Python and the models module isn't available yet. So we add the
# app to the postponed list and we'll try it again after all the
# recursion has finished (in populate). If can_postpone is False
# then it's time to raise the ImportError.
else:
if can_postpone:
self.postponed.append(app_name)
return None
else:
raise
self.nesting_level -= 1
if models not in self.app_store:
self.app_store[models] = len(self.app_store)
self.app_labels[self._label_for(models)] = models
return models
def app_cache_ready(self):
"""
Returns true if the model cache is fully populated.
Useful for code that wants to cache the results of get_models() for
themselves once it is safe to do so.
"""
return self.loaded
def get_apps(self):
"Returns a list of all installed modules that contain models."
self._populate()
# Ensure the returned list is always in the same order (with new apps
# added at the end). This avoids unstable ordering on the admin app
# list page, for example.
apps = [(v, k) for k, v in self.app_store.items()]
apps.sort()
return [elt[1] for elt in apps]
def get_app(self, app_label, emptyOK=False):
"""
Returns the module containing the models for the given app_label. If
the app has no models in it and 'emptyOK' is True, returns None.
"""
self._populate()
self.write_lock.acquire()
try:
for app_name in settings.INSTALLED_APPS:
if app_label == app_name.split('.')[-1]:
mod = self.load_app(app_name, False)
if mod is None:
if emptyOK:
return None
raise ImproperlyConfigured("App with label %s is missing a models.py module." % app_label)
else:
return mod
raise ImproperlyConfigured("App with label %s could not be found" % app_label)
finally:
self.write_lock.release()
def get_app_errors(self):
"Returns the map of known problems with the INSTALLED_APPS."
self._populate()
return self.app_errors
def get_models(self, app_mod=None,
include_auto_created=False, include_deferred=False,
only_installed=True):
"""
Given a module containing models, returns a list of the models.
Otherwise returns a list of all installed models.
By default, auto-created models (i.e., m2m models without an
explicit intermediate table) are not included. However, if you
specify include_auto_created=True, they will be.
By default, models created to satisfy deferred attribute
queries are *not* included in the list of models. However, if
you specify include_deferred, they will be.
"""
cache_key = (app_mod, include_auto_created, include_deferred, only_installed)
try:
return self._get_models_cache[cache_key]
except KeyError:
pass
self._populate()
if app_mod:
if app_mod in self.app_store:
app_list = [self.app_models.get(self._label_for(app_mod),
SortedDict())]
else:
app_list = []
else:
if only_installed:
app_list = [self.app_models.get(app_label, SortedDict())
for app_label in self.app_labels.iterkeys()]
else:
app_list = self.app_models.itervalues()
model_list = []
for app in app_list:
model_list.extend(
model for model in app.values()
if ((not model._deferred or include_deferred) and
(not model._meta.auto_created or include_auto_created))
)
self._get_models_cache[cache_key] = model_list
return model_list
def get_model(self, app_label, model_name,
seed_cache=True, only_installed=True):
"""
Returns the model matching the given app_label and case-insensitive
model_name.
Returns None if no model is found.
"""
if seed_cache:
self._populate()
if only_installed and app_label not in self.app_labels:
return None
return self.app_models.get(app_label, SortedDict()).get(model_name.lower())
def register_models(self, app_label, *models):
"""
Register a set of models as belonging to an app.
"""
for model in models:
# Store as 'name: model' pair in a dictionary
# in the app_models dictionary
model_name = model._meta.object_name.lower()
model_dict = self.app_models.setdefault(app_label, SortedDict())
if model_name in model_dict:
# The same model may be imported via different paths (e.g.
# appname.models and project.appname.models). We use the source
# filename as a means to detect identity.
fname1 = os.path.abspath(sys.modules[model.__module__].__file__)
fname2 = os.path.abspath(sys.modules[model_dict[model_name].__module__].__file__)
# Since the filename extension could be .py the first time and
# .pyc or .pyo the second time, ignore the extension when
# comparing.
if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
continue
model_dict[model_name] = model
self._get_models_cache.clear()
cache = AppCache()
# These methods were always module level, so are kept that way for backwards
# compatibility.
get_apps = cache.get_apps
get_app = cache.get_app
get_app_errors = cache.get_app_errors
get_models = cache.get_models
get_model = cache.get_model
register_models = cache.register_models
load_app = cache.load_app
app_cache_ready = cache.app_cache_ready
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/db/models/loading.py
|
Python
|
bsd-3-clause
| 9,755
|
from collections import namedtuple
import math
import itertools
inf = float('inf')
def area(polygon, signed=False):
L = len(polygon)
a = 0.5 * sum(
p[0] * q[1] - q[0] * p[1]
for p, q in edges(polygon)
)
if not signed:
a = abs(a)
return a
def centroid(polygon):
m = 1 / 6 / area(polygon, signed=True)
L = len(polygon)
cx = m * sum(
(p[0] + q[0]) *
(p[0] * q[1] - q[0] * p[1]) for p, q in edges(polygon)
)
cy = m * sum(
(p[1] + q[1]) *
(p[0] * q[1] - q[0] * p[1]) for p, q in edges(polygon)
)
return cx, cy
def edges(polygon):
s = len(polygon)
return ((polygon[i], polygon[(i+1) % s]) for i in range(s))
def winding_number(polygon, point):
"""
http://geomalgorithms.com/a03-_inclusion.html
"""
result = 0
for p, q in edges(polygon):
if p[1] <= point[1]:
if q[1] > point[1] and _is_left(p, q, point) > 0:
result += 1
else:
if q[1] <= point[1] and _is_left(p, q, point) < 0:
result -= 1
return result
def contains(polygon, point):
return winding_number(polygon, point) != 0
def limits(polygon, axis):
vals = [sum(v * a for v, a in zip(axis, vertex)) for vertex in polygon]
return min(vals), max(vals)
def _is_left(p0, p1, p2):
"""
Input: three points P0, P1, and P2
Return: >0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line
See: http://geomalgorithms.com/a01-_area.html
"""
return ((p1[0] - p0[0]) * (p2[1] - p0[1]) -
(p2[0] - p0[0]) * (p1[1] - p0[1])
)
class BoundingBox(object):
def __init__(self, p0, p1):
self.p0 = p0
self.p1 = p1
def __repr__(self):
return 'BoundingBox({}, {})'.format(self.p0, self.p1)
@classmethod
def around(cls, *polygons):
if len(polygons) == 1 and hasattr(polygons[0], 'bbox') and polygons[0].bbox:
return polygons[0].bbox
p0 = tuple(min(c) for c in zip(*itertools.chain(*polygons)))
p1 = tuple(max(c) for c in zip(*itertools.chain(*polygons)))
return cls(p0, p1)
def vertices(self):
for index in range(1 << self.dim()):
yield tuple(
m1 if (1 & (index >> i)) else m0
for m0, m1, i
in zip(self.p0, self.p1, range(self.dim()))
)
def contains(self, point, inclusive=True):
if inclusive:
return all(c0 <= cp <= c1 for c0, cp, c1 in zip(self.p0, point, self.p1))
else:
return all(c0 < cp < c1 for c0, cp, c1 in zip(self.p0, point, self.p1))
def collides(self, polygon, approximate=False):
if approximate and not isinstance(polygon, BoundingBox):
polygon = BoundingBox.around(polygon)
if isinstance(polygon, BoundingBox):
return self._collides_bounding_box(polygon)
else:
return any(self.contains(vertex) for vertex in polygon) \
or polygon.contains(self.center) \
or any(self._clip_segment(*edge) for edge in edges(polygon)) \
or False
# or any(polygon.contains(vertex) for vertex in self.vertices()) \
@property
def center(self):
return tuple((c0 + c1) / 2 for c0, c1 in zip(self.p0, self.p1))
def dim(self):
return len(self.p0)
def split(self, center=None):
center = center or self.center
return [self._split_quadrant(i, center=center) for i in range(1 << self.dim())]
def quadrant(self, point, center=None):
center = center or self.center
return sum(1 << i if point[i] >= center[i] else 0 for i in range(self.dim()))
def _cmp(self, point):
return tuple(-1 if p < m0 else 0 if p <= m1 else 1 for p, m0, m1 in zip(point, self.p0, self.p1))
def _collides_bounding_box(self, bbox):
return not all(0 < (m0 - o1) * (m1 - o0) for m0, m1, o0, o1 in zip(self.p0, self.p1, bbox.p0, bbox.p1))
def _clip_segment(self, v0, v1):
z = zip(self.p0, self.p1, v0, v1)
if any(o0 < m0 > o1 or o0 > m1 < o1 for m0, m1, o0, o1 in z):
return None
orders = [
(m0, m1, o0, o1 - o0) if o1 > o0 else (m1, m0, o0, o1 - o0)
for m0, m1, o0, o1 in zip(self.p0, self.p1, v0, v1)
if o0 != o1
]
minarg = max(0, max(((m0 - o0) / od for m0,_,o0,od in orders), default=0))
maxarg = min(1, min(((m1 - o0) / od for _,m1,o0,od in orders), default=1))
# print((self.p0, self.p1, v0, v1, minarg, maxarg))
if minarg < maxarg:
r0 = tuple(o0 + minarg * od for _,_,o0,od in orders)
r1 = tuple(o0 + maxarg * od for _,_,o0,od in orders)
return r0, r1
else:
return None
def _split_quadrant(self, index, center=None):
if isinstance(index, int):
indices = [(index >> i) & 1 for i in range(self.dim())]
else:
indices = index
center = center or self.center
low = list()
high = list()
for i, c0, cc, c1 in zip(indices, self.p0, center, self.p1):
if i > 0:
low.append(cc)
high.append(c1)
else:
low.append(c0)
high.append(cc)
return BoundingBox(tuple(low), tuple(high))
class Polygon(object):
def __init__(self, vertices, indices=None):
self.vertices = vertices
self.indices = indices or range(len(vertices))
self.bbox = None
self._centroid = None
def __len__(self):
return len(self.indices)
def __iter__(self):
return (self.vertices[i] for i in self.indices)
def __getitem__(self, item):
return self.vertices[self.indices[item % len(self.indices)]]
def __repr__(self):
return 'Polygon({})'.format(', '.join(str(v) for v in self))
@property
def edges(self):
return edges(self)
@property
def edges_indices(self):
s = len(self)
return ((self.indices[i], self.indices[(i+1) % s]) for i in range(s))
def prepare_bbox(self):
self.bbox = BoundingBox.around(self)
return self
def winding_number(self, point):
if self.bbox and not self.bbox.contains(point):
return 0
return winding_number(self, point)
def contains(self, point):
return self.winding_number(point) != 0
def area(self, signed=False):
return area(self, signed=signed)
def centroid(self):
if self._centroid is None:
self._centroid = centroid(self)
return self._centroid
|
kcsaff/maze-builder
|
maze_builder/meshes/geometry.py
|
Python
|
mit
| 6,785
|
anon = lambda -> qqq[None]: None
def f(): return 1 # this line should not break
anon : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
lambda : meta.lambda-function.python, source.python, storage.type.function.lambda.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
-> : invalid.illegal.annotation.python, meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
qqq[None] : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
: : meta.lambda-function.python, punctuation.section.function.lambda.begin.python, source.python
: source.python
None : constant.language.python, source.python
def : meta.function.python, source.python, storage.type.function.python
: meta.function.python, source.python
f : entity.name.function.python, meta.function.python, source.python
( : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.begin.python, source.python
) : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.end.python, source.python
: : meta.function.python, punctuation.section.function.begin.python, source.python
: source.python
return : keyword.control.flow.python, source.python
: source.python
1 : constant.numeric.dec.python, source.python
: source.python
# : comment.line.number-sign.python, punctuation.definition.comment.python, source.python
this line should not break : comment.line.number-sign.python, source.python
|
MagicStack/MagicPython
|
test/functions/lambda5.py
|
Python
|
mit
| 1,823
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.http import HttpResponse
from rapidsms.webui.utils import render_to_response
#from apps.messaging.models import *
#from apps.messaging.utils import *
def index(req):
return render_to_response(req,
"messaging/index.html")
|
takinbo/rapidsms-borno
|
apps/messaging/views.py
|
Python
|
lgpl-3.0
| 298
|
#
# libtcod 1.6.0 python wrapper
# Copyright (c) 2008,2009,2010,2012,2013 Jice & Mingos
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Jice or Mingos may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JICE AND MINGOS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JICE OR MINGOS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import ctypes
import struct
from ctypes import *
if not hasattr(ctypes, "c_bool"): # for Python < 2.6
c_bool = c_uint8
try: #import NumPy if available
import numpy
numpy_available = True
except ImportError:
numpy_available = False
LINUX=False
MAC=False
MINGW=False
MSVC=False
if sys.platform.find('linux') != -1:
_lib = ctypes.cdll['./libtcod.so']
LINUX=True
elif sys.platform.find('darwin') != -1:
_lib = ctypes.cdll['./libtcod.dylib']
MAC = True
elif sys.platform.find('haiku') != -1:
_lib = ctypes.cdll['./libtcod.so']
HAIKU = True
else:
try:
_lib = ctypes.cdll['./libtcod-mingw.dll']
MINGW=True
except WindowsError:
_lib = ctypes.cdll['./libtcod-VS.dll']
MSVC=True
# On Windows, ctypes doesn't work well with function returning structs,
# so we have to user the _wrapper functions instead
_lib.TCOD_color_multiply = _lib.TCOD_color_multiply_wrapper
_lib.TCOD_color_add = _lib.TCOD_color_add_wrapper
_lib.TCOD_color_multiply_scalar = _lib.TCOD_color_multiply_scalar_wrapper
_lib.TCOD_color_subtract = _lib.TCOD_color_subtract_wrapper
_lib.TCOD_color_lerp = _lib.TCOD_color_lerp_wrapper
_lib.TCOD_console_get_default_background = _lib.TCOD_console_get_default_background_wrapper
_lib.TCOD_console_get_default_foreground = _lib.TCOD_console_get_default_foreground_wrapper
_lib.TCOD_console_get_char_background = _lib.TCOD_console_get_char_background_wrapper
_lib.TCOD_console_get_char_foreground = _lib.TCOD_console_get_char_foreground_wrapper
_lib.TCOD_console_get_fading_color = _lib.TCOD_console_get_fading_color_wrapper
_lib.TCOD_image_get_pixel = _lib.TCOD_image_get_pixel_wrapper
_lib.TCOD_image_get_mipmap_pixel = _lib.TCOD_image_get_mipmap_pixel_wrapper
_lib.TCOD_parser_get_color_property = _lib.TCOD_parser_get_color_property_wrapper
HEXVERSION = 0x010600
STRVERSION = "1.6.0"
TECHVERSION = 0x01060000
############################
# color module
############################
class Color(Structure):
_fields_ = [('r', c_uint8),
('g', c_uint8),
('b', c_uint8),
]
def __eq__(self, c):
return _lib.TCOD_color_equals(self, c)
def __mul__(self, c):
if isinstance(c,Color):
return _lib.TCOD_color_multiply(self, c)
else:
return _lib.TCOD_color_multiply_scalar(self, c_float(c))
def __add__(self, c):
return _lib.TCOD_color_add(self, c)
def __sub__(self, c):
return _lib.TCOD_color_subtract(self, c)
def __repr__(self):
return "Color(%d,%d,%d)" % (self.r, self.g, self.b)
def __getitem__(self, i):
if type(i) == str:
return getattr(self, i)
else:
return getattr(self, "rgb"[i])
def __setitem__(self, i, c):
if type(i) == str:
setattr(self, i, c)
else:
setattr(self, "rgb"[i], c)
def __iter__(self):
yield self.r
yield self.g
yield self.b
# Should be valid on any platform, check it! Has to be done after Color is defined.
if MAC:
from cprotos import setup_protos
setup_protos(_lib)
_lib.TCOD_color_equals.restype = c_bool
_lib.TCOD_color_multiply.restype = Color
_lib.TCOD_color_multiply_scalar.restype = Color
_lib.TCOD_color_add.restype = Color
_lib.TCOD_color_subtract.restype = Color
# default colors
# grey levels
black=Color(0,0,0)
darkest_grey=Color(31,31,31)
darker_grey=Color(63,63,63)
dark_grey=Color(95,95,95)
grey=Color(127,127,127)
light_grey=Color(159,159,159)
lighter_grey=Color(191,191,191)
lightest_grey=Color(223,223,223)
darkest_gray=Color(31,31,31)
darker_gray=Color(63,63,63)
dark_gray=Color(95,95,95)
gray=Color(127,127,127)
light_gray=Color(159,159,159)
lighter_gray=Color(191,191,191)
lightest_gray=Color(223,223,223)
white=Color(255,255,255)
# sepia
darkest_sepia=Color(31,24,15)
darker_sepia=Color(63,50,31)
dark_sepia=Color(94,75,47)
sepia=Color(127,101,63)
light_sepia=Color(158,134,100)
lighter_sepia=Color(191,171,143)
lightest_sepia=Color(222,211,195)
#standard colors
red=Color(255,0,0)
flame=Color(255,63,0)
orange=Color(255,127,0)
amber=Color(255,191,0)
yellow=Color(255,255,0)
lime=Color(191,255,0)
chartreuse=Color(127,255,0)
green=Color(0,255,0)
sea=Color(0,255,127)
turquoise=Color(0,255,191)
cyan=Color(0,255,255)
sky=Color(0,191,255)
azure=Color(0,127,255)
blue=Color(0,0,255)
han=Color(63,0,255)
violet=Color(127,0,255)
purple=Color(191,0,255)
fuchsia=Color(255,0,255)
magenta=Color(255,0,191)
pink=Color(255,0,127)
crimson=Color(255,0,63)
# dark colors
dark_red=Color(191,0,0)
dark_flame=Color(191,47,0)
dark_orange=Color(191,95,0)
dark_amber=Color(191,143,0)
dark_yellow=Color(191,191,0)
dark_lime=Color(143,191,0)
dark_chartreuse=Color(95,191,0)
dark_green=Color(0,191,0)
dark_sea=Color(0,191,95)
dark_turquoise=Color(0,191,143)
dark_cyan=Color(0,191,191)
dark_sky=Color(0,143,191)
dark_azure=Color(0,95,191)
dark_blue=Color(0,0,191)
dark_han=Color(47,0,191)
dark_violet=Color(95,0,191)
dark_purple=Color(143,0,191)
dark_fuchsia=Color(191,0,191)
dark_magenta=Color(191,0,143)
dark_pink=Color(191,0,95)
dark_crimson=Color(191,0,47)
# darker colors
darker_red=Color(127,0,0)
darker_flame=Color(127,31,0)
darker_orange=Color(127,63,0)
darker_amber=Color(127,95,0)
darker_yellow=Color(127,127,0)
darker_lime=Color(95,127,0)
darker_chartreuse=Color(63,127,0)
darker_green=Color(0,127,0)
darker_sea=Color(0,127,63)
darker_turquoise=Color(0,127,95)
darker_cyan=Color(0,127,127)
darker_sky=Color(0,95,127)
darker_azure=Color(0,63,127)
darker_blue=Color(0,0,127)
darker_han=Color(31,0,127)
darker_violet=Color(63,0,127)
darker_purple=Color(95,0,127)
darker_fuchsia=Color(127,0,127)
darker_magenta=Color(127,0,95)
darker_pink=Color(127,0,63)
darker_crimson=Color(127,0,31)
# darkest colors
darkest_red=Color(63,0,0)
darkest_flame=Color(63,15,0)
darkest_orange=Color(63,31,0)
darkest_amber=Color(63,47,0)
darkest_yellow=Color(63,63,0)
darkest_lime=Color(47,63,0)
darkest_chartreuse=Color(31,63,0)
darkest_green=Color(0,63,0)
darkest_sea=Color(0,63,31)
darkest_turquoise=Color(0,63,47)
darkest_cyan=Color(0,63,63)
darkest_sky=Color(0,47,63)
darkest_azure=Color(0,31,63)
darkest_blue=Color(0,0,63)
darkest_han=Color(15,0,63)
darkest_violet=Color(31,0,63)
darkest_purple=Color(47,0,63)
darkest_fuchsia=Color(63,0,63)
darkest_magenta=Color(63,0,47)
darkest_pink=Color(63,0,31)
darkest_crimson=Color(63,0,15)
# light colors
light_red=Color(255,114,114)
light_flame=Color(255,149,114)
light_orange=Color(255,184,114)
light_amber=Color(255,219,114)
light_yellow=Color(255,255,114)
light_lime=Color(219,255,114)
light_chartreuse=Color(184,255,114)
light_green=Color(114,255,114)
light_sea=Color(114,255,184)
light_turquoise=Color(114,255,219)
light_cyan=Color(114,255,255)
light_sky=Color(114,219,255)
light_azure=Color(114,184,255)
light_blue=Color(114,114,255)
light_han=Color(149,114,255)
light_violet=Color(184,114,255)
light_purple=Color(219,114,255)
light_fuchsia=Color(255,114,255)
light_magenta=Color(255,114,219)
light_pink=Color(255,114,184)
light_crimson=Color(255,114,149)
#lighter colors
lighter_red=Color(255,165,165)
lighter_flame=Color(255,188,165)
lighter_orange=Color(255,210,165)
lighter_amber=Color(255,232,165)
lighter_yellow=Color(255,255,165)
lighter_lime=Color(232,255,165)
lighter_chartreuse=Color(210,255,165)
lighter_green=Color(165,255,165)
lighter_sea=Color(165,255,210)
lighter_turquoise=Color(165,255,232)
lighter_cyan=Color(165,255,255)
lighter_sky=Color(165,232,255)
lighter_azure=Color(165,210,255)
lighter_blue=Color(165,165,255)
lighter_han=Color(188,165,255)
lighter_violet=Color(210,165,255)
lighter_purple=Color(232,165,255)
lighter_fuchsia=Color(255,165,255)
lighter_magenta=Color(255,165,232)
lighter_pink=Color(255,165,210)
lighter_crimson=Color(255,165,188)
# lightest colors
lightest_red=Color(255,191,191)
lightest_flame=Color(255,207,191)
lightest_orange=Color(255,223,191)
lightest_amber=Color(255,239,191)
lightest_yellow=Color(255,255,191)
lightest_lime=Color(239,255,191)
lightest_chartreuse=Color(223,255,191)
lightest_green=Color(191,255,191)
lightest_sea=Color(191,255,223)
lightest_turquoise=Color(191,255,239)
lightest_cyan=Color(191,255,255)
lightest_sky=Color(191,239,255)
lightest_azure=Color(191,223,255)
lightest_blue=Color(191,191,255)
lightest_han=Color(207,191,255)
lightest_violet=Color(223,191,255)
lightest_purple=Color(239,191,255)
lightest_fuchsia=Color(255,191,255)
lightest_magenta=Color(255,191,239)
lightest_pink=Color(255,191,223)
lightest_crimson=Color(255,191,207)
# desaturated colors
desaturated_red=Color(127,63,63)
desaturated_flame=Color(127,79,63)
desaturated_orange=Color(127,95,63)
desaturated_amber=Color(127,111,63)
desaturated_yellow=Color(127,127,63)
desaturated_lime=Color(111,127,63)
desaturated_chartreuse=Color(95,127,63)
desaturated_green=Color(63,127,63)
desaturated_sea=Color(63,127,95)
desaturated_turquoise=Color(63,127,111)
desaturated_cyan=Color(63,127,127)
desaturated_sky=Color(63,111,127)
desaturated_azure=Color(63,95,127)
desaturated_blue=Color(63,63,127)
desaturated_han=Color(79,63,127)
desaturated_violet=Color(95,63,127)
desaturated_purple=Color(111,63,127)
desaturated_fuchsia=Color(127,63,127)
desaturated_magenta=Color(127,63,111)
desaturated_pink=Color(127,63,95)
desaturated_crimson=Color(127,63,79)
# metallic
brass=Color(191,151,96)
copper=Color(197,136,124)
gold=Color(229,191,0)
silver=Color(203,203,203)
# miscellaneous
celadon=Color(172,255,175)
peach=Color(255,159,127)
# color functions
_lib.TCOD_color_lerp.restype = Color
def color_lerp(c1, c2, a):
return _lib.TCOD_color_lerp(c1, c2, c_float(a))
def color_set_hsv(c, h, s, v):
_lib.TCOD_color_set_HSV(byref(c), c_float(h), c_float(s), c_float(v))
def color_get_hsv(c):
h = c_float()
s = c_float()
v = c_float()
_lib.TCOD_color_get_HSV(c, byref(h), byref(s), byref(v))
return h.value, s.value, v.value
def color_scale_HSV(c, scoef, vcoef) :
_lib.TCOD_color_scale_HSV(byref(c),c_float(scoef),c_float(vcoef))
def color_gen_map(colors, indexes):
ccolors = (Color * len(colors))(*colors)
cindexes = (c_int * len(indexes))(*indexes)
cres = (Color * (max(indexes) + 1))()
_lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)
return cres
############################
# console module
############################
class Key(Structure):
_fields_=[('vk', c_int),
('c', c_uint8),
('pressed', c_bool),
('lalt', c_bool),
('lctrl', c_bool),
('ralt', c_bool),
('rctrl', c_bool),
('shift', c_bool),
]
class ConsoleBuffer:
# simple console that allows direct (fast) access to cells. simplifies
# use of the "fill" functions.
def __init__(self, width, height, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# initialize with given width and height. values to fill the buffer
# are optional, defaults to black with no characters.
n = width * height
self.width = width
self.height = height
self.clear(back_r, back_g, back_b, fore_r, fore_g, fore_b, char)
def clear(self, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# clears the console. values to fill it with are optional, defaults
# to black with no characters.
n = self.width * self.height
self.back_r = [back_r] * n
self.back_g = [back_g] * n
self.back_b = [back_b] * n
self.fore_r = [fore_r] * n
self.fore_g = [fore_g] * n
self.fore_b = [fore_b] * n
self.char = [ord(char)] * n
def __iter__(self):
self.index = -1
return self
def next(self):
self.index = self.index + 1
if self.index == self.width * self.height:
raise StopIteration
return self.__getitem__(self.index)
def __getitem__(self, i):
return [self.back_r[i], self.back_g[i], self.back_b[i], self.fore_r[i], self.fore_g[i], self.fore_b[i], self.char[i]]
def copy(self):
# returns a copy of this ConsoleBuffer.
other = ConsoleBuffer(0, 0)
other.width = self.width
other.height = self.height
other.back_r = list(self.back_r) # make explicit copies of all lists
other.back_g = list(self.back_g)
other.back_b = list(self.back_b)
other.fore_r = list(self.fore_r)
other.fore_g = list(self.fore_g)
other.fore_b = list(self.fore_b)
other.char = list(self.char)
return other
def set_fore(self, x, y, r, g, b, char):
# set the character and foreground color of one cell.
i = self.width * y + x
self.fore_r[i] = r
self.fore_g[i] = g
self.fore_b[i] = b
self.char[i] = char
def set_back(self, x, y, r, g, b):
# set the background color of one cell.
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
def get_back(self, x, y):
# get the background color of one cell.
i = self.width * y + x
return [self.back_r[i], self.back_g[i], self.back_b[i]]
def set(self, x, y, back_r, back_g, back_b, fore_r, fore_g, fore_b, char):
# set the background color, foreground color and character of one cell.
i = self.width * y + x
self.back_r[i] = back_r
self.back_g[i] = back_g
self.back_b[i] = back_b
self.fore_r[i] = fore_r
self.fore_g[i] = fore_g
self.fore_b[i] = fore_b
self.char[i] = char
def blit(self, dest, fill_fore=True, fill_back=True):
# use libtcod's "fill" functions to write the buffer to a console.
if (console_get_width(dest) != self.width or
console_get_height(dest) != self.height):
raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')
s = struct.Struct('%di' % len(self.back_r))
if fill_back:
_lib.TCOD_console_fill_background(dest, (c_int * len(self.back_r))(*self.back_r), (c_int * len(self.back_g))(*self.back_g), (c_int * len(self.back_b))(*self.back_b))
if fill_fore:
_lib.TCOD_console_fill_foreground(dest, (c_int * len(self.fore_r))(*self.fore_r), (c_int * len(self.fore_g))(*self.fore_g), (c_int * len(self.fore_b))(*self.fore_b))
_lib.TCOD_console_fill_char(dest, (c_int * len(self.char))(*self.char))
_lib.TCOD_console_credits_render.restype = c_bool
_lib.TCOD_console_is_fullscreen.restype = c_bool
_lib.TCOD_console_is_window_closed.restype = c_bool
_lib.TCOD_console_has_mouse_focus.restype = c_bool
_lib.TCOD_console_is_active.restype = c_bool
_lib.TCOD_console_get_default_background.restype = Color
_lib.TCOD_console_get_default_foreground.restype = Color
_lib.TCOD_console_get_char_background.restype = Color
_lib.TCOD_console_get_char_foreground.restype = Color
_lib.TCOD_console_get_fading_color.restype = Color
_lib.TCOD_console_is_key_pressed.restype = c_bool
# background rendering modes
BKGND_NONE = 0
BKGND_SET = 1
BKGND_MULTIPLY = 2
BKGND_LIGHTEN = 3
BKGND_DARKEN = 4
BKGND_SCREEN = 5
BKGND_COLOR_DODGE = 6
BKGND_COLOR_BURN = 7
BKGND_ADD = 8
BKGND_ADDA = 9
BKGND_BURN = 10
BKGND_OVERLAY = 11
BKGND_ALPH = 12
BKGND_DEFAULT=13
def BKGND_ALPHA(a):
return BKGND_ALPH | (int(a * 255) << 8)
def BKGND_ADDALPHA(a):
return BKGND_ADDA | (int(a * 255) << 8)
# non blocking key events types
KEY_PRESSED = 1
KEY_RELEASED = 2
# key codes
KEY_NONE = 0
KEY_ESCAPE = 1
KEY_BACKSPACE = 2
KEY_TAB = 3
KEY_ENTER = 4
KEY_SHIFT = 5
KEY_CONTROL = 6
KEY_ALT = 7
KEY_PAUSE = 8
KEY_CAPSLOCK = 9
KEY_PAGEUP = 10
KEY_PAGEDOWN = 11
KEY_END = 12
KEY_HOME = 13
KEY_UP = 14
KEY_LEFT = 15
KEY_RIGHT = 16
KEY_DOWN = 17
KEY_PRINTSCREEN = 18
KEY_INSERT = 19
KEY_DELETE = 20
KEY_LWIN = 21
KEY_RWIN = 22
KEY_APPS = 23
KEY_0 = 24
KEY_1 = 25
KEY_2 = 26
KEY_3 = 27
KEY_4 = 28
KEY_5 = 29
KEY_6 = 30
KEY_7 = 31
KEY_8 = 32
KEY_9 = 33
KEY_KP0 = 34
KEY_KP1 = 35
KEY_KP2 = 36
KEY_KP3 = 37
KEY_KP4 = 38
KEY_KP5 = 39
KEY_KP6 = 40
KEY_KP7 = 41
KEY_KP8 = 42
KEY_KP9 = 43
KEY_KPADD = 44
KEY_KPSUB = 45
KEY_KPDIV = 46
KEY_KPMUL = 47
KEY_KPDEC = 48
KEY_KPENTER = 49
KEY_F1 = 50
KEY_F2 = 51
KEY_F3 = 52
KEY_F4 = 53
KEY_F5 = 54
KEY_F6 = 55
KEY_F7 = 56
KEY_F8 = 57
KEY_F9 = 58
KEY_F10 = 59
KEY_F11 = 60
KEY_F12 = 61
KEY_NUMLOCK = 62
KEY_SCROLLLOCK = 63
KEY_SPACE = 64
KEY_CHAR = 65
# special chars
# single walls
CHAR_HLINE = 196
CHAR_VLINE = 179
CHAR_NE = 191
CHAR_NW = 218
CHAR_SE = 217
CHAR_SW = 192
CHAR_TEEW = 180
CHAR_TEEE = 195
CHAR_TEEN = 193
CHAR_TEES = 194
CHAR_CROSS = 197
# double walls
CHAR_DHLINE = 205
CHAR_DVLINE = 186
CHAR_DNE = 187
CHAR_DNW = 201
CHAR_DSE = 188
CHAR_DSW = 200
CHAR_DTEEW = 185
CHAR_DTEEE = 204
CHAR_DTEEN = 202
CHAR_DTEES = 203
CHAR_DCROSS = 206
# blocks
CHAR_BLOCK1 = 176
CHAR_BLOCK2 = 177
CHAR_BLOCK3 = 178
# arrows
CHAR_ARROW_N = 24
CHAR_ARROW_S = 25
CHAR_ARROW_E = 26
CHAR_ARROW_W = 27
# arrows without tail
CHAR_ARROW2_N = 30
CHAR_ARROW2_S = 31
CHAR_ARROW2_E = 16
CHAR_ARROW2_W = 17
# double arrows
CHAR_DARROW_H = 29
CHAR_DARROW_V = 18
# GUI stuff
CHAR_CHECKBOX_UNSET = 224
CHAR_CHECKBOX_SET = 225
CHAR_RADIO_UNSET = 9
CHAR_RADIO_SET = 10
# sub-pixel resolution kit
CHAR_SUBP_NW = 226
CHAR_SUBP_NE = 227
CHAR_SUBP_N = 228
CHAR_SUBP_SE = 229
CHAR_SUBP_DIAG = 230
CHAR_SUBP_E = 231
CHAR_SUBP_SW = 232
# misc characters
CHAR_BULLET = 7
CHAR_BULLET_INV = 8
CHAR_BULLET_SQUARE = 254
CHAR_CENT = 189
CHAR_CLUB = 5
CHAR_COPYRIGHT = 184
CHAR_CURRENCY = 207
CHAR_DIAMOND = 4
CHAR_DIVISION = 246
CHAR_EXCLAM_DOUBLE = 19
CHAR_FEMALE = 12
CHAR_FUNCTION = 159
CHAR_GRADE = 248
CHAR_HALF = 171
CHAR_HEART = 3
CHAR_LIGHT = 15
CHAR_MALE = 11
CHAR_MULTIPLICATION = 158
CHAR_NOTE = 13
CHAR_NOTE_DOUBLE = 14
CHAR_ONE_QUARTER = 172
CHAR_PILCROW = 20
CHAR_POUND = 156
CHAR_POW1 = 251
CHAR_POW2 = 253
CHAR_POW3 = 252
CHAR_RESERVED = 169
CHAR_SECTION = 21
CHAR_SMILIE = 1
CHAR_SMILIE_INV = 2
CHAR_SPADE = 6
CHAR_THREE_QUARTERS = 243
CHAR_UMLAUT = 249
CHAR_YEN = 190
# font flags
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_TYPE_GRAYSCALE = 4
FONT_LAYOUT_TCOD = 8
# color control codes
COLCTRL_1=1
COLCTRL_2=2
COLCTRL_3=3
COLCTRL_4=4
COLCTRL_5=5
COLCTRL_NUMBER=5
COLCTRL_FORE_RGB=6
COLCTRL_BACK_RGB=7
COLCTRL_STOP=8
# renderers
RENDERER_GLSL=0
RENDERER_OPENGL=1
RENDERER_SDL=2
NB_RENDERERS=3
# alignment
LEFT=0
RIGHT=1
CENTER=2
# initializing the console
def console_init_root(w, h, title, fullscreen=False, renderer=RENDERER_SDL):
_lib.TCOD_console_init_root(w, h, c_char_p(title), fullscreen, renderer)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_set_custom_font(fontFile, flags=FONT_LAYOUT_ASCII_INCOL, nb_char_horiz=0, nb_char_vertic=0):
_lib.TCOD_console_set_custom_font(c_char_p(fontFile), flags, nb_char_horiz, nb_char_vertic)
def console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY):
if type(asciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_code_to_font(ord(asciiCode), fontCharX,
fontCharY)
else:
_lib.TCOD_console_map_ascii_code_to_font(asciiCode, fontCharX,
fontCharY)
def console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX,
fontCharY):
if type(firstAsciiCode) == str or type(firstAsciiCode) == bytes:
_lib.TCOD_console_map_ascii_codes_to_font(ord(firstAsciiCode), nbCodes,
fontCharX, fontCharY)
else:
_lib.TCOD_console_map_ascii_codes_to_font(firstAsciiCode, nbCodes,
fontCharX, fontCharY)
def console_map_string_to_font(s, fontCharX, fontCharY):
if type(s) == bytes:
_lib.TCOD_console_map_string_to_font(s, fontCharX, fontCharY)
else:
_lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)
def console_is_fullscreen():
return _lib.TCOD_console_is_fullscreen()
def console_set_fullscreen(fullscreen):
_lib.TCOD_console_set_fullscreen(c_int(fullscreen))
def console_is_window_closed():
return _lib.TCOD_console_is_window_closed()
def console_has_mouse_focus():
return _lib.TCOD_console_has_mouse_focus()
def console_is_active():
return _lib.TCOD_console_is_active()
def console_set_window_title(title):
_lib.TCOD_console_set_window_title(c_char_p(title))
def console_credits():
_lib.TCOD_console_credits()
def console_credits_reset():
_lib.TCOD_console_credits_reset()
def console_credits_render(x, y, alpha):
return _lib.TCOD_console_credits_render(x, y, c_int(alpha))
def console_flush():
_lib.TCOD_console_flush()
# drawing on a console
def console_set_default_background(con, col):
_lib.TCOD_console_set_default_background(con, col)
def console_set_default_foreground(con, col):
_lib.TCOD_console_set_default_foreground(con, col)
def console_clear(con):
return _lib.TCOD_console_clear(con)
def console_put_char(con, x, y, c, flag=BKGND_DEFAULT):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char(con, x, y, ord(c), flag)
else:
_lib.TCOD_console_put_char(con, x, y, c, flag)
def console_put_char_ex(con, x, y, c, fore, back):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char_ex(con, x, y, ord(c), fore, back)
else:
_lib.TCOD_console_put_char_ex(con, x, y, c, fore, back)
def console_set_char_background(con, x, y, col, flag=BKGND_SET):
_lib.TCOD_console_set_char_background(con, x, y, col, flag)
def console_set_char_foreground(con, x, y, col):
_lib.TCOD_console_set_char_foreground(con, x, y, col)
def console_set_char(con, x, y, c):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_set_char(con, x, y, ord(c))
else:
_lib.TCOD_console_set_char(con, x, y, c)
def console_set_background_flag(con, flag):
_lib.TCOD_console_set_background_flag(con, c_int(flag))
def console_get_background_flag(con):
return _lib.TCOD_console_get_background_flag(con)
def console_set_alignment(con, alignment):
_lib.TCOD_console_set_alignment(con, c_int(alignment))
def console_get_alignment(con):
return _lib.TCOD_console_get_alignment(con)
def console_print(con, x, y, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print(c_void_p(con), x, y, c_char_p(fmt))
else:
_lib.TCOD_console_print_utf(c_void_p(con), x, y, fmt)
def console_print_ex(con, x, y, flag, alignment, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print_ex(c_void_p(con), x, y, flag, alignment, c_char_p(fmt))
else:
_lib.TCOD_console_print_ex_utf(c_void_p(con), x, y, flag, alignment, fmt)
def console_print_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_print_rect_ex(con, x, y, w, h, flag, alignment, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect_ex(c_void_p(con), x, y, w, h, flag, alignment, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_ex_utf(c_void_p(con), x, y, w, h, flag, alignment, fmt)
def console_get_height_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_get_height_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_get_height_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_rect(con, x, y, w, h, clr, flag=BKGND_DEFAULT):
_lib.TCOD_console_rect(con, x, y, w, h, c_int(clr), flag)
def console_hline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_hline( con, x, y, l, flag)
def console_vline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_vline( con, x, y, l, flag)
def console_print_frame(con, x, y, w, h, clear=True, flag=BKGND_DEFAULT, fmt=0):
_lib.TCOD_console_print_frame(c_void_p(con), x, y, w, h, c_int(clear), flag, c_char_p(fmt))
def console_set_color_control(con,fore,back) :
_lib.TCOD_console_set_color_control(con,fore,back)
def console_get_default_background(con):
return _lib.TCOD_console_get_default_background(con)
def console_get_default_foreground(con):
return _lib.TCOD_console_get_default_foreground(con)
def console_get_char_background(con, x, y):
return _lib.TCOD_console_get_char_background(con, x, y)
def console_get_char_foreground(con, x, y):
return _lib.TCOD_console_get_char_foreground(con, x, y)
def console_get_char(con, x, y):
return _lib.TCOD_console_get_char(con, x, y)
def console_set_fade(fade, fadingColor):
_lib.TCOD_console_set_fade(fade, fadingColor)
##_lib.TCOD_console_set_fade_wrapper(fade, fadingColor)
def console_get_fade():
return _lib.TCOD_console_get_fade().value
def console_get_fading_color():
return _lib.TCOD_console_get_fading_color()
# handling keyboard input
def console_wait_for_keypress(flush):
k=Key()
_lib.TCOD_console_wait_for_keypress_wrapper(byref(k),c_bool(flush))
return k
def console_check_for_keypress(flags=KEY_RELEASED):
k=Key()
_lib.TCOD_console_check_for_keypress_wrapper(byref(k),c_int(flags))
return k
def console_is_key_pressed(key):
return _lib.TCOD_console_is_key_pressed(key)
def console_set_keyboard_repeat(initial_delay, interval):
_lib.TCOD_console_set_keyboard_repeat(initial_delay, interval)
def console_disable_keyboard_repeat():
_lib.TCOD_console_disable_keyboard_repeat()
# using offscreen consoles
def console_new(w, h):
return _lib.TCOD_console_new(w, h)
def console_from_file(filename):
return _lib.TCOD_console_from_file(filename)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_blit(src, x, y, w, h, dst, xdst, ydst, ffade=1.0,bfade=1.0):
_lib.TCOD_console_blit(src, x, y, w, h, dst, xdst, ydst, c_float(ffade), c_float(bfade))
def console_set_key_color(con, col):
_lib.TCOD_console_set_key_color(con, col)
def console_delete(con):
_lib.TCOD_console_delete(con)
# fast color filling
def console_fill_foreground(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int32)
g = numpy.ascontiguousarray(g, dtype=numpy.int32)
b = numpy.ascontiguousarray(b, dtype=numpy.int32)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_foreground(con, cr, cg, cb)
def console_fill_background(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int32)
g = numpy.ascontiguousarray(g, dtype=numpy.int32)
b = numpy.ascontiguousarray(b, dtype=numpy.int32)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_background(con, cr, cg, cb)
def console_fill_char(con,arr) :
if (numpy_available and isinstance(arr, numpy.ndarray) ):
#numpy arrays, use numpy's ctypes functions
arr = numpy.ascontiguousarray(arr, dtype=numpy.int32)
carr = arr.ctypes.data_as(POINTER(c_int))
else:
#otherwise convert using the struct module
carr = struct.pack('%di' % len(arr), *arr)
_lib.TCOD_console_fill_char(con, carr)
def console_load_asc(con, filename) :
_lib.TCOD_console_load_asc(con,filename)
def console_save_asc(con, filename) :
_lib.TCOD_console_save_asc(con,filename)
def console_load_apf(con, filename) :
_lib.TCOD_console_load_apf(con,filename)
def console_save_apf(con, filename) :
_lib.TCOD_console_save_apf(con,filename)
############################
# sys module
############################
_lib.TCOD_sys_get_last_frame_length.restype = c_float
_lib.TCOD_sys_elapsed_seconds.restype = c_float
# high precision time functions
def sys_set_fps(fps):
_lib.TCOD_sys_set_fps(fps)
def sys_get_fps():
return _lib.TCOD_sys_get_fps()
def sys_get_last_frame_length():
return _lib.TCOD_sys_get_last_frame_length()
def sys_sleep_milli(val):
_lib.TCOD_sys_sleep_milli(c_uint(val))
def sys_elapsed_milli():
return _lib.TCOD_sys_elapsed_milli()
def sys_elapsed_seconds():
return _lib.TCOD_sys_elapsed_seconds()
def sys_set_renderer(renderer):
_lib.TCOD_sys_set_renderer(renderer)
def sys_get_renderer():
return _lib.TCOD_sys_get_renderer()
# easy screenshots
def sys_save_screenshot(name=0):
_lib.TCOD_sys_save_screenshot(c_char_p(name))
# custom fullscreen resolution
def sys_force_fullscreen_resolution(width, height):
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
def sys_get_current_resolution():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_current_resolution(byref(w), byref(h))
return w.value, h.value
def sys_get_char_size():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_char_size(byref(w), byref(h))
return w.value, h.value
# update font bitmap
def sys_update_char(asciiCode, fontx, fonty, img, x, y) :
_lib.TCOD_sys_update_char(c_int(asciiCode),c_int(fontx),c_int(fonty),img,c_int(x),c_int(y))
# custom SDL post renderer
SDL_RENDERER_FUNC = CFUNCTYPE(None, c_void_p)
def sys_register_SDL_renderer(callback):
global sdl_renderer_func
sdl_renderer_func = SDL_RENDERER_FUNC(callback)
_lib.TCOD_sys_register_SDL_renderer(sdl_renderer_func)
# events
EVENT_NONE=0
EVENT_KEY_PRESS=1
EVENT_KEY_RELEASE=2
EVENT_KEY=EVENT_KEY_PRESS|EVENT_KEY_RELEASE
EVENT_MOUSE_MOVE=4
EVENT_MOUSE_PRESS=8
EVENT_MOUSE_RELEASE=16
EVENT_MOUSE=EVENT_MOUSE_MOVE|EVENT_MOUSE_PRESS|EVENT_MOUSE_RELEASE
EVENT_ANY=EVENT_KEY|EVENT_MOUSE
def sys_check_for_event(mask,k,m) :
return _lib.TCOD_sys_check_for_event(c_int(mask),byref(k),byref(m))
def sys_wait_for_event(mask,k,m,flush) :
return _lib.TCOD_sys_wait_for_event(c_int(mask),byref(k),byref(m),c_bool(flush))
############################
# line module
############################
_lib.TCOD_line_step.restype = c_bool
_lib.TCOD_line.restype=c_bool
_lib.TCOD_line_step_mt.restype = c_bool
def line_init(xo, yo, xd, yd):
_lib.TCOD_line_init(xo, yo, xd, yd)
def line_step():
x = c_int()
y = c_int()
ret = _lib.TCOD_line_step(byref(x), byref(y))
if not ret:
return x.value, y.value
return None,None
def line(xo,yo,xd,yd,py_callback) :
LINE_CBK_FUNC=CFUNCTYPE(c_bool,c_int,c_int)
c_callback=LINE_CBK_FUNC(py_callback)
return _lib.TCOD_line(xo,yo,xd,yd,c_callback)
def line_iter(xo, yo, xd, yd):
data = (c_int * 9)() # struct TCOD_bresenham_data_t
_lib.TCOD_line_init_mt(xo, yo, xd, yd, data)
x = c_int(xo)
y = c_int(yo)
done = False
while not done:
yield x.value, y.value
done = _lib.TCOD_line_step_mt(byref(x), byref(y), data)
############################
# image module
############################
_lib.TCOD_image_is_pixel_transparent.restype = c_bool
_lib.TCOD_image_get_pixel.restype = Color
_lib.TCOD_image_get_mipmap_pixel.restype = Color
def image_new(width, height):
return _lib.TCOD_image_new(width, height)
def image_clear(image,col) :
_lib.TCOD_image_clear(image,col)
def image_invert(image) :
_lib.TCOD_image_invert(image)
def image_hflip(image) :
_lib.TCOD_image_hflip(image)
def image_rotate90(image, num=1) :
_lib.TCOD_image_rotate90(image,num)
def image_vflip(image) :
_lib.TCOD_image_vflip(image)
def image_scale(image, neww, newh) :
_lib.TCOD_image_scale(image,c_int(neww),c_int(newh))
def image_set_key_color(image,col) :
_lib.TCOD_image_set_key_color(image,col)
def image_get_alpha(image,x,y) :
return _lib.TCOD_image_get_alpha(image,c_int(x),c_int(y))
def image_is_pixel_transparent(image,x,y) :
return _lib.TCOD_image_is_pixel_transparent(image,c_int(x),c_int(y))
def image_load(filename):
return _lib.TCOD_image_load(c_char_p(filename))
def image_from_console(console):
return _lib.TCOD_image_from_console(console)
def image_refresh_console(image, console):
_lib.TCOD_image_refresh_console(image, console)
def image_get_size(image):
w=c_int()
h=c_int()
_lib.TCOD_image_get_size(image, byref(w), byref(h))
return w.value, h.value
def image_get_pixel(image, x, y):
return _lib.TCOD_image_get_pixel(image, x, y)
def image_get_mipmap_pixel(image, x0, y0, x1, y1):
return _lib.TCOD_image_get_mipmap_pixel(image, c_float(x0), c_float(y0),
c_float(x1), c_float(y1))
def image_put_pixel(image, x, y, col):
_lib.TCOD_image_put_pixel(image, x, y, col)
##_lib.TCOD_image_put_pixel_wrapper(image, x, y, col)
def image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle):
_lib.TCOD_image_blit(image, console, c_float(x), c_float(y), bkgnd_flag,
c_float(scalex), c_float(scaley), c_float(angle))
def image_blit_rect(image, console, x, y, w, h, bkgnd_flag):
_lib.TCOD_image_blit_rect(image, console, x, y, w, h, bkgnd_flag)
def image_blit_2x(image, console, dx, dy, sx=0, sy=0, w=-1, h=-1):
_lib.TCOD_image_blit_2x(image, console, dx,dy,sx,sy,w,h)
def image_save(image, filename):
_lib.TCOD_image_save(image, c_char_p(filename))
def image_delete(image):
_lib.TCOD_image_delete(image)
############################
# mouse module
############################
class Mouse(Structure):
_fields_=[('x', c_int),
('y', c_int),
('dx', c_int),
('dy', c_int),
('cx', c_int),
('cy', c_int),
('dcx', c_int),
('dcy', c_int),
('lbutton', c_bool),
('rbutton', c_bool),
('mbutton', c_bool),
('lbutton_pressed', c_bool),
('rbutton_pressed', c_bool),
('mbutton_pressed', c_bool),
('wheel_up', c_bool),
('wheel_down', c_bool),
]
_lib.TCOD_mouse_is_cursor_visible.restype = c_bool
def mouse_show_cursor(visible):
_lib.TCOD_mouse_show_cursor(c_int(visible))
def mouse_is_cursor_visible():
return _lib.TCOD_mouse_is_cursor_visible()
def mouse_move(x, y):
_lib.TCOD_mouse_move(x, y)
def mouse_get_status():
mouse=Mouse()
_lib.TCOD_mouse_get_status_wrapper(byref(mouse))
return mouse
############################
# parser module
############################
_lib.TCOD_struct_get_name.restype = c_char_p
_lib.TCOD_struct_is_mandatory.restype = c_bool
_lib.TCOD_parser_has_property.restype = c_bool
_lib.TCOD_parser_get_bool_property.restype = c_bool
_lib.TCOD_parser_get_float_property.restype = c_float
_lib.TCOD_parser_get_string_property.restype = c_char_p
_lib.TCOD_parser_get_color_property.restype = Color
class Dice(Structure):
_fields_=[('nb_dices', c_int),
('nb_faces', c_int),
('multiplier', c_float),
('addsub', c_float),
]
def __repr__(self):
return "Dice(%d, %d, %s, %s)" % (self.nb_dices, self.nb_faces,
self.multiplier, self.addsub)
class _CValue(Union):
_fields_=[('c',c_uint8),
('i',c_int),
('f',c_float),
('s',c_char_p),
# JBR03192012 See http://bugs.python.org/issue14354 for why these are not defined as their actual types
('col',c_uint8 * 3),
('dice',c_int * 4),
('custom',c_void_p),
]
_CFUNC_NEW_STRUCT = CFUNCTYPE(c_uint, c_void_p, c_char_p)
_CFUNC_NEW_FLAG = CFUNCTYPE(c_uint, c_char_p)
_CFUNC_NEW_PROPERTY = CFUNCTYPE(c_uint, c_char_p, c_int, _CValue)
class _CParserListener(Structure):
_fields_=[('new_struct', _CFUNC_NEW_STRUCT),
('new_flag',_CFUNC_NEW_FLAG),
('new_property',_CFUNC_NEW_PROPERTY),
('end_struct',_CFUNC_NEW_STRUCT),
('error',_CFUNC_NEW_FLAG),
]
# property types
TYPE_NONE = 0
TYPE_BOOL = 1
TYPE_CHAR = 2
TYPE_INT = 3
TYPE_FLOAT = 4
TYPE_STRING = 5
TYPE_COLOR = 6
TYPE_DICE = 7
TYPE_VALUELIST00 = 8
TYPE_VALUELIST01 = 9
TYPE_VALUELIST02 = 10
TYPE_VALUELIST03 = 11
TYPE_VALUELIST04 = 12
TYPE_VALUELIST05 = 13
TYPE_VALUELIST06 = 14
TYPE_VALUELIST07 = 15
TYPE_VALUELIST08 = 16
TYPE_VALUELIST09 = 17
TYPE_VALUELIST10 = 18
TYPE_VALUELIST11 = 19
TYPE_VALUELIST12 = 20
TYPE_VALUELIST13 = 21
TYPE_VALUELIST14 = 22
TYPE_VALUELIST15 = 23
TYPE_LIST = 1024
def _convert_TCODList(clist, typ):
res = list()
for i in range(_lib.TCOD_list_size(clist)):
elt = _lib.TCOD_list_get(clist, i)
elt = cast(elt, c_void_p)
if typ == TYPE_BOOL:
elt = c_bool.from_buffer(elt).value
elif typ == TYPE_CHAR:
elt = c_char.from_buffer(elt).value
elif typ == TYPE_INT:
elt = c_int.from_buffer(elt).value
elif typ == TYPE_FLOAT:
elt = c_float.from_buffer(elt).value
elif typ == TYPE_STRING or TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
elt = cast(elt, c_char_p).value
elif typ == TYPE_COLOR:
elt = Color.from_buffer_copy(elt)
elif typ == TYPE_DICE:
# doesn't work
elt = Dice.from_buffer_copy(elt)
res.append(elt)
return res
def parser_new():
return _lib.TCOD_parser_new()
def parser_new_struct(parser, name):
return _lib.TCOD_parser_new_struct(parser, name)
def struct_add_flag(struct, name):
_lib.TCOD_struct_add_flag(struct, name)
def struct_add_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_property(struct, name, typ, c_bool(mandatory))
def struct_add_value_list(struct, name, value_list, mandatory):
CARRAY = c_char_p * (len(value_list) + 1)
cvalue_list = CARRAY()
for i in range(len(value_list)):
cvalue_list[i] = cast(value_list[i], c_char_p)
cvalue_list[len(value_list)] = 0
_lib.TCOD_struct_add_value_list(struct, name, cvalue_list, c_bool(mandatory))
def struct_add_list_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_list_property(struct, name, typ, c_bool(mandatory))
def struct_add_structure(struct, sub_struct):
_lib.TCOD_struct_add_structure(struct, sub_struct)
def struct_get_name(struct):
return _lib.TCOD_struct_get_name(struct)
def struct_is_mandatory(struct, name):
return _lib.TCOD_struct_is_mandatory(struct, name)
def struct_get_type(struct, name):
return _lib.TCOD_struct_get_type(struct, name)
def parser_run(parser, filename, listener=0):
if listener != 0:
clistener=_CParserListener()
def value_converter(name, typ, value):
if typ == TYPE_BOOL:
return listener.new_property(name, typ, value.c == 1)
elif typ == TYPE_CHAR:
return listener.new_property(name, typ, '%c' % (value.c & 0xFF))
elif typ == TYPE_INT:
return listener.new_property(name, typ, value.i)
elif typ == TYPE_FLOAT:
return listener.new_property(name, typ, value.f)
elif typ == TYPE_STRING or \
TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
return listener.new_property(name, typ, value.s)
elif typ == TYPE_COLOR:
col = cast(value.col, POINTER(Color)).contents
return listener.new_property(name, typ, col)
elif typ == TYPE_DICE:
dice = cast(value.dice, POINTER(Dice)).contents
return listener.new_property(name, typ, dice)
elif typ & TYPE_LIST:
return listener.new_property(name, typ,
_convert_TCODList(value.custom, typ & 0xFF))
return True
clistener.new_struct = _CFUNC_NEW_STRUCT(listener.new_struct)
clistener.new_flag = _CFUNC_NEW_FLAG(listener.new_flag)
clistener.new_property = _CFUNC_NEW_PROPERTY(value_converter)
clistener.end_struct = _CFUNC_NEW_STRUCT(listener.end_struct)
clistener.error = _CFUNC_NEW_FLAG(listener.error)
_lib.TCOD_parser_run(parser, c_char_p(filename), byref(clistener))
else:
_lib.TCOD_parser_run(parser, c_char_p(filename), 0)
def parser_delete(parser):
_lib.TCOD_parser_delete(parser)
def parser_has_property(parser, name):
return _lib.TCOD_parser_has_property(parser, c_char_p(name))
def parser_get_bool_property(parser, name):
return _lib.TCOD_parser_get_bool_property(parser, c_char_p(name))
def parser_get_int_property(parser, name):
return _lib.TCOD_parser_get_int_property(parser, c_char_p(name))
def parser_get_char_property(parser, name):
return '%c' % _lib.TCOD_parser_get_char_property(parser, c_char_p(name))
def parser_get_float_property(parser, name):
return _lib.TCOD_parser_get_float_property(parser, c_char_p(name))
def parser_get_string_property(parser, name):
return _lib.TCOD_parser_get_string_property(parser, c_char_p(name))
def parser_get_color_property(parser, name):
return _lib.TCOD_parser_get_color_property(parser, c_char_p(name))
def parser_get_dice_property(parser, name):
d = Dice()
_lib.TCOD_parser_get_dice_property_py(c_void_p(parser), c_char_p(name), byref(d))
return d
def parser_get_list_property(parser, name, typ):
clist = _lib.TCOD_parser_get_list_property(parser, c_char_p(name), c_int(typ))
return _convert_TCODList(clist, typ)
############################
# random module
############################
_lib.TCOD_random_get_float.restype = c_float
_lib.TCOD_random_get_double.restype = c_double
RNG_MT = 0
RNG_CMWC = 1
DISTRIBUTION_LINEAR = 0
DISTRIBUTION_GAUSSIAN = 1
DISTRIBUTION_GAUSSIAN_RANGE = 2
DISTRIBUTION_GAUSSIAN_INVERSE = 3
DISTRIBUTION_GAUSSIAN_RANGE_INVERSE = 4
def random_get_instance():
return _lib.TCOD_random_get_instance()
def random_new(algo=RNG_CMWC):
return _lib.TCOD_random_new(algo)
def random_new_from_seed(seed, algo=RNG_CMWC):
return _lib.TCOD_random_new_from_seed(algo,c_uint(seed))
def random_set_distribution(rnd, dist) :
_lib.TCOD_random_set_distribution(rnd, dist)
def random_get_int(rnd, mi, ma):
return _lib.TCOD_random_get_int(rnd, mi, ma)
def random_get_float(rnd, mi, ma):
return _lib.TCOD_random_get_float(rnd, c_float(mi), c_float(ma))
def random_get_double(rnd, mi, ma):
return _lib.TCOD_random_get_double(rnd, c_double(mi), c_double(ma))
def random_get_int_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_int_mean(rnd, mi, ma, mean)
def random_get_float_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_float_mean(rnd, c_float(mi), c_float(ma), c_float(mean))
def random_get_double_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_double_mean(rnd, c_double(mi), c_double(ma), c_double(mean))
def random_save(rnd):
return _lib.TCOD_random_save(rnd)
def random_restore(rnd, backup):
_lib.TCOD_random_restore(rnd, backup)
def random_delete(rnd):
_lib.TCOD_random_delete(rnd)
############################
# noise module
############################
_lib.TCOD_noise_get.restype = c_float
_lib.TCOD_noise_get_ex.restype = c_float
_lib.TCOD_noise_get_fbm.restype = c_float
_lib.TCOD_noise_get_fbm_ex.restype = c_float
_lib.TCOD_noise_get_turbulence.restype = c_float
_lib.TCOD_noise_get_turbulence_ex.restype = c_float
NOISE_DEFAULT_HURST = 0.5
NOISE_DEFAULT_LACUNARITY = 2.0
NOISE_DEFAULT = 0
NOISE_PERLIN = 1
NOISE_SIMPLEX = 2
NOISE_WAVELET = 4
_NOISE_PACKER_FUNC = (None,
(c_float * 1),
(c_float * 2),
(c_float * 3),
(c_float * 4),
)
def noise_new(dim, h=NOISE_DEFAULT_HURST, l=NOISE_DEFAULT_LACUNARITY, random=0):
return _lib.TCOD_noise_new(dim, c_float(h), c_float(l), random)
def noise_set_type(n, typ) :
_lib.TCOD_noise_set_type(n,typ)
def noise_get(n, f, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), typ)
def noise_get_fbm(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_fbm_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_get_turbulence(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_turbulence_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_delete(n):
_lib.TCOD_noise_delete(n)
############################
# fov module
############################
_lib.TCOD_map_is_in_fov.restype = c_bool
_lib.TCOD_map_is_transparent.restype = c_bool
_lib.TCOD_map_is_walkable.restype = c_bool
FOV_BASIC = 0
FOV_DIAMOND = 1
FOV_SHADOW = 2
FOV_PERMISSIVE_0 = 3
FOV_PERMISSIVE_1 = 4
FOV_PERMISSIVE_2 = 5
FOV_PERMISSIVE_3 = 6
FOV_PERMISSIVE_4 = 7
FOV_PERMISSIVE_5 = 8
FOV_PERMISSIVE_6 = 9
FOV_PERMISSIVE_7 = 10
FOV_PERMISSIVE_8 = 11
FOV_RESTRICTIVE = 12
NB_FOV_ALGORITHMS = 13
def FOV_PERMISSIVE(p) :
return FOV_PERMISSIVE_0+p
def map_new(w, h):
return _lib.TCOD_map_new(w, h)
def map_copy(source, dest):
return _lib.TCOD_map_copy(source, dest)
def map_set_properties(m, x, y, isTrans, isWalk):
_lib.TCOD_map_set_properties(m, x, y, c_int(isTrans), c_int(isWalk))
def map_clear(m,walkable=False,transparent=False):
_lib.TCOD_map_clear(m,c_int(walkable),c_int(transparent))
def map_compute_fov(m, x, y, radius=0, light_walls=True, algo=FOV_RESTRICTIVE ):
_lib.TCOD_map_compute_fov(m, x, y, c_int(radius), c_bool(light_walls), c_int(algo))
def map_is_in_fov(m, x, y):
return _lib.TCOD_map_is_in_fov(m, x, y)
def map_is_transparent(m, x, y):
return _lib.TCOD_map_is_transparent(m, x, y)
def map_is_walkable(m, x, y):
return _lib.TCOD_map_is_walkable(m, x, y)
def map_delete(m):
return _lib.TCOD_map_delete(m)
def map_get_width(map):
return _lib.TCOD_map_get_width(map)
def map_get_height(map):
return _lib.TCOD_map_get_height(map)
############################
# pathfinding module
############################
_lib.TCOD_path_compute.restype = c_bool
_lib.TCOD_path_is_empty.restype = c_bool
_lib.TCOD_path_walk.restype = c_bool
PATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)
def path_new_using_map(m, dcost=1.41):
return (_lib.TCOD_path_new_using_map(c_void_p(m), c_float(dcost)), None)
def path_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_new_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def path_compute(p, ox, oy, dx, dy):
return _lib.TCOD_path_compute(p[0], ox, oy, dx, dy)
def path_get_origin(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_origin(p[0], byref(x), byref(y))
return x.value, y.value
def path_get_destination(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_destination(p[0], byref(x), byref(y))
return x.value, y.value
def path_size(p):
return _lib.TCOD_path_size(p[0])
def path_reverse(p):
_lib.TCOD_path_reverse(p[0])
def path_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_path_get(p[0], idx, byref(x), byref(y))
return x.value, y.value
def path_is_empty(p):
return _lib.TCOD_path_is_empty(p[0])
def path_walk(p, recompute):
x = c_int()
y = c_int()
if _lib.TCOD_path_walk(p[0], byref(x), byref(y), c_int(recompute)):
return x.value, y.value
return None,None
def path_delete(p):
_lib.TCOD_path_delete(p[0])
_lib.TCOD_dijkstra_path_set.restype = c_bool
_lib.TCOD_dijkstra_is_empty.restype = c_bool
_lib.TCOD_dijkstra_path_walk.restype = c_bool
_lib.TCOD_dijkstra_get_distance.restype = c_float
def dijkstra_new(m, dcost=1.41):
return (_lib.TCOD_dijkstra_new(c_void_p(m), c_float(dcost)), None)
def dijkstra_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_dijkstra_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def dijkstra_compute(p, ox, oy):
_lib.TCOD_dijkstra_compute(p[0], c_int(ox), c_int(oy))
def dijkstra_path_set(p, x, y):
return _lib.TCOD_dijkstra_path_set(p[0], c_int(x), c_int(y))
def dijkstra_get_distance(p, x, y):
return _lib.TCOD_dijkstra_get_distance(p[0], c_int(x), c_int(y))
def dijkstra_size(p):
return _lib.TCOD_dijkstra_size(p[0])
def dijkstra_reverse(p):
_lib.TCOD_dijkstra_reverse(p[0])
def dijkstra_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_dijkstra_get(p[0], c_int(idx), byref(x), byref(y))
return x.value, y.value
def dijkstra_is_empty(p):
return _lib.TCOD_dijkstra_is_empty(p[0])
def dijkstra_path_walk(p):
x = c_int()
y = c_int()
if _lib.TCOD_dijkstra_path_walk(p[0], byref(x), byref(y)):
return x.value, y.value
return None,None
def dijkstra_delete(p):
_lib.TCOD_dijkstra_delete(p[0])
############################
# bsp module
############################
class _CBsp(Structure):
_fields_ = [('next', c_void_p),
('father', c_void_p),
('son', c_void_p),
('x', c_int),
('y', c_int),
('w', c_int),
('h', c_int),
('position', c_int),
('level', c_uint8),
('horizontal', c_bool),
]
_lib.TCOD_bsp_new_with_size.restype = POINTER(_CBsp)
_lib.TCOD_bsp_left.restype = POINTER(_CBsp)
_lib.TCOD_bsp_right.restype = POINTER(_CBsp)
_lib.TCOD_bsp_father.restype = POINTER(_CBsp)
_lib.TCOD_bsp_is_leaf.restype = c_bool
_lib.TCOD_bsp_contains.restype = c_bool
_lib.TCOD_bsp_find_node.restype = POINTER(_CBsp)
BSP_CBK_FUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
# python class encapsulating the _CBsp pointer
class Bsp(object):
def __init__(self, cnode):
pcbsp = cast(cnode, POINTER(_CBsp))
self.p = pcbsp
def getx(self):
return self.p.contents.x
def setx(self, value):
self.p.contents.x = value
x = property(getx, setx)
def gety(self):
return self.p.contents.y
def sety(self, value):
self.p.contents.y = value
y = property(gety, sety)
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def getpos(self):
return self.p.contents.position
def setpos(self, value):
self.p.contents.position = value
position = property(getpos, setpos)
def gethor(self):
return self.p.contents.horizontal
def sethor(self,value):
self.p.contents.horizontal = value
horizontal = property(gethor, sethor)
def getlev(self):
return self.p.contents.level
def setlev(self,value):
self.p.contents.level = value
level = property(getlev, setlev)
def bsp_new_with_size(x, y, w, h):
return Bsp(_lib.TCOD_bsp_new_with_size(x, y, w, h))
def bsp_split_once(node, horizontal, position):
_lib.TCOD_bsp_split_once(node.p, c_int(horizontal), position)
def bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,
maxVRatio):
_lib.TCOD_bsp_split_recursive(node.p, randomizer, nb, minHSize, minVSize,
c_float(maxHRatio), c_float(maxVRatio))
def bsp_resize(node, x, y, w, h):
_lib.TCOD_bsp_resize(node.p, x, y, w, h)
def bsp_left(node):
return Bsp(_lib.TCOD_bsp_left(node.p))
def bsp_right(node):
return Bsp(_lib.TCOD_bsp_right(node.p))
def bsp_father(node):
return Bsp(_lib.TCOD_bsp_father(node.p))
def bsp_is_leaf(node):
return _lib.TCOD_bsp_is_leaf(node.p)
def bsp_contains(node, cx, cy):
return _lib.TCOD_bsp_contains(node.p, cx, cy)
def bsp_find_node(node, cx, cy):
return Bsp(_lib.TCOD_bsp_find_node(node.p, cx, cy))
def _bsp_traverse(node, callback, userData, func):
# convert the c node into a python node
#before passing it to the actual callback
def node_converter(cnode, data):
node = Bsp(cnode)
return callback(node, data)
cbk_func = BSP_CBK_FUNC(node_converter)
func(node.p, cbk_func, userData)
def bsp_traverse_pre_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_pre_order)
def bsp_traverse_in_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_in_order)
def bsp_traverse_post_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_post_order)
def bsp_traverse_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_level_order)
def bsp_traverse_inverted_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData,
_lib.TCOD_bsp_traverse_inverted_level_order)
def bsp_remove_sons(node):
_lib.TCOD_bsp_remove_sons(node.p)
def bsp_delete(node):
_lib.TCOD_bsp_delete(node.p)
############################
# heightmap module
############################
class _CHeightMap(Structure):
_fields_=[('w', c_int),
('h', c_int),
('values', POINTER(c_float)),
]
_lib.TCOD_heightmap_new.restype = POINTER(_CHeightMap)
_lib.TCOD_heightmap_get_value.restype = c_float
_lib.TCOD_heightmap_has_land_on_border.restype = c_bool
class HeightMap(object):
def __init__(self, chm):
pchm = cast(chm, POINTER(_CHeightMap))
self.p = pchm
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def heightmap_new(w, h):
phm = _lib.TCOD_heightmap_new(w, h)
return HeightMap(phm)
def heightmap_set_value(hm, x, y, value):
_lib.TCOD_heightmap_set_value(hm.p, x, y, c_float(value))
def heightmap_add(hm, value):
_lib.TCOD_heightmap_add(hm.p, c_float(value))
def heightmap_scale(hm, value):
_lib.TCOD_heightmap_scale(hm.p, c_float(value))
def heightmap_clear(hm):
_lib.TCOD_heightmap_clear(hm.p)
def heightmap_clamp(hm, mi, ma):
_lib.TCOD_heightmap_clamp(hm.p, c_float(mi),c_float(ma))
def heightmap_copy(hm1, hm2):
_lib.TCOD_heightmap_copy(hm1.p, hm2.p)
def heightmap_normalize(hm, mi=0.0, ma=1.0):
_lib.TCOD_heightmap_normalize(hm.p, c_float(mi), c_float(ma))
def heightmap_lerp_hm(hm1, hm2, hm3, coef):
_lib.TCOD_heightmap_lerp_hm(hm1.p, hm2.p, hm3.p, c_float(coef))
def heightmap_add_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_add_hm(hm1.p, hm2.p, hm3.p)
def heightmap_multiply_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_multiply_hm(hm1.p, hm2.p, hm3.p)
def heightmap_add_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_add_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_dig_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_dig_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_mid_point_displacement(hm, rng, roughness):
_lib.TCOD_heightmap_mid_point_displacement(hm.p, rng, c_float(roughness))
def heightmap_rain_erosion(hm, nbDrops, erosionCoef, sedimentationCoef, rnd=0):
_lib.TCOD_heightmap_rain_erosion(hm.p, nbDrops, c_float( erosionCoef),
c_float( sedimentationCoef), rnd)
def heightmap_kernel_transform(hm, kernelsize, dx, dy, weight, minLevel,
maxLevel):
FARRAY = c_float * kernelsize
IARRAY = c_int * kernelsize
cdx = IARRAY(*dx)
cdy = IARRAY(*dy)
cweight = FARRAY(*weight)
_lib.TCOD_heightmap_kernel_transform(hm.p, kernelsize, cdx, cdy, cweight,
c_float(minLevel), c_float(maxLevel))
def heightmap_add_voronoi(hm, nbPoints, nbCoef, coef, rnd=0):
FARRAY = c_float * nbCoef
ccoef = FARRAY(*coef)
_lib.TCOD_heightmap_add_voronoi(hm.p, nbPoints, nbCoef, ccoef, rnd)
def heightmap_add_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta, scale):
_lib.TCOD_heightmap_add_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_scale_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta,
scale):
_lib.TCOD_heightmap_scale_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_dig_bezier(hm, px, py, startRadius, startDepth, endRadius,
endDepth):
IARRAY = c_int * 4
cpx = IARRAY(*px)
cpy = IARRAY(*py)
_lib.TCOD_heightmap_dig_bezier(hm.p, cpx, cpy, c_float(startRadius),
c_float(startDepth), c_float(endRadius),
c_float(endDepth))
def heightmap_get_value(hm, x, y):
return _lib.TCOD_heightmap_get_value(hm.p, x, y)
def heightmap_get_interpolated_value(hm, x, y):
return _lib.TCOD_heightmap_get_interpolated_value(hm.p, c_float(x),
c_float(y))
def heightmap_get_slope(hm, x, y):
return _lib.TCOD_heightmap_get_slope(hm.p, x, y)
def heightmap_get_normal(hm, x, y, waterLevel):
FARRAY = c_float * 3
cn = FARRAY()
_lib.TCOD_heightmap_get_normal(hm.p, c_float(x), c_float(y), cn,
c_float(waterLevel))
return cn[0], cn[1], cn[2]
def heightmap_count_cells(hm, mi, ma):
return _lib.TCOD_heightmap_count_cells(hm.p, c_float(mi), c_float(ma))
def heightmap_has_land_on_border(hm, waterlevel):
return _lib.TCOD_heightmap_has_land_on_border(hm.p, c_float(waterlevel))
def heightmap_get_minmax(hm):
mi = c_float()
ma = c_float()
_lib.TCOD_heightmap_get_minmax(hm.p, byref(mi), byref(ma))
return mi.value, ma.value
def heightmap_delete(hm):
_lib.TCOD_heightmap_delete(hm.p)
############################
# name generator module
############################
_lib.TCOD_namegen_generate.restype = c_char_p
_lib.TCOD_namegen_generate_custom.restype = c_char_p
def namegen_parse(filename,random=0) :
_lib.TCOD_namegen_parse(filename,random)
def namegen_generate(name) :
return _lib.TCOD_namegen_generate(name, 0)
def namegen_generate_custom(name, rule) :
return _lib.TCOD_namegen_generate(name, rule, 0)
def namegen_get_sets():
nb=_lib.TCOD_namegen_get_nb_sets_wrapper()
SARRAY = c_char_p * nb;
setsa = SARRAY()
_lib.TCOD_namegen_get_sets_wrapper(setsa)
return list(setsa)
def namegen_destroy() :
_lib.TCOD_namegen_destroy()
|
AnthonyDiGirolamo/heliopause
|
libtcodpy.py
|
Python
|
mit
| 61,827
|
#!/usr/bin/env python
"""Configuration parameters for the test subsystem."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import config_lib
# Default for running in the current directory
config_lib.DEFINE_constant_string(
"Test.srcdir",
"%(grr_response_core|module_path)/../../../",
"The directory containing the source code.")
config_lib.DEFINE_constant_string(
"Test.data_dir",
default="%(grr_response_test/test_data@grr-response-test|resource)",
help="The directory where test data exist.")
config_lib.DEFINE_constant_string(
"Test.additional_test_config",
default="%(Test.data_dir)/localtest.yaml",
help="The path to a test config with local customizations.")
config_lib.DEFINE_string(
"Test.tmpdir", "/tmp/", help="Somewhere to write temporary files.")
config_lib.DEFINE_string("Test.data_store", "FakeDataStore",
"The data store to run the tests against.")
config_lib.DEFINE_integer("Test.remote_pdb_port", 2525, "Remote debugger port.")
config_lib.DEFINE_string("PrivateKeys.ca_key_raw_data", "",
"For testing purposes.")
config_lib.DEFINE_integer("SharedMemoryDB.port", 0,
"Port used to connect to SharedMemoryDB server.")
config_lib.DEFINE_string(
"Mysql.schema_dump_path", "%(grr_response_server/databases/mysql.ddl@"
"grr-response-server|resource)",
"Location of the dumped MySQL schema path.")
|
dunkhong/grr
|
grr/core/grr_response_core/config/test.py
|
Python
|
apache-2.0
| 1,537
|
import mxnet as mx
import logging
import os
import time
def _get_lr_scheduler(args, kv):
if 'lr_factor' not in args or args.lr_factor >= 1:
return (args.lr, None)
epoch_size = args.num_examples / args.batch_size
if 'dist' in args.kv_store:
epoch_size /= kv.num_workers
begin_epoch = args.load_epoch if args.load_epoch else 0
step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]
lr = args.lr
for s in step_epochs:
if begin_epoch >= s:
lr *= args.lr_factor
if lr != args.lr:
logging.info('Adjust learning rate to %e for epoch %d' %(lr, begin_epoch))
steps = [epoch_size * (x-begin_epoch) for x in step_epochs if x-begin_epoch > 0]
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor))
def _load_model(args, rank=0):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None)
assert args.model_prefix is not None
model_prefix = args.model_prefix
if rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)):
model_prefix += "-%d" % (rank)
sym, arg_params, aux_params = mx.model.load_checkpoint(
model_prefix, args.load_epoch)
logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch)
return (sym, arg_params, aux_params)
def _save_model(args, rank=0):
if args.model_prefix is None:
return None
dst_dir = os.path.dirname(args.model_prefix)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
return mx.callback.do_checkpoint(args.model_prefix if rank == 0 else "%s-%d" % (
args.model_prefix, rank))
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--num-nodes', type=int, default=1,
help='number of machines used for training')
train.add_argument('--wd', type=float, default=0.00001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
return train
def fit(args, network, data_loader, init=None, **kwargs):
"""
train a model
args : argparse returns
network : the symbol definition of the nerual network
data_loader : function that returns the train and val data iterators
"""
# kvstore
kv = mx.kvstore.create(args.kv_store)
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# data iterators
(train, val) = data_loader(args, kv)
if args.test_io:
tic = time.time()
for i, batch in enumerate(train):
for j in batch.data:
j.wait_to_read()
if (i+1) % args.disp_batches == 0:
logging.info('Batch [%d]\tSpeed: %.2f samples/sec' % (
i, args.disp_batches*args.batch_size/(time.time()-tic)))
tic = time.time()
return
# load model
if 'arg_params' in kwargs and 'aux_params' in kwargs:
arg_params = kwargs['arg_params']
aux_params = kwargs['aux_params']
else:
sym, arg_params, aux_params = _load_model(args, kv.rank)
if sym is not None:
assert sym.tojson() == network.tojson()
# save model
checkpoint = _save_model(args, kv.rank)
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# learning rate
lr, lr_scheduler = _get_lr_scheduler(args, kv)
# create model
model = mx.mod.Module(
context = devs,
symbol = network
)
lr_scheduler = lr_scheduler
optimizer_params = {
'learning_rate': lr,
'momentum' : args.mom,
'wd' : args.wd,
'lr_scheduler': lr_scheduler}
monitor = mx.mon.Monitor(args.monitor, pattern=".*") if args.monitor > 0 else None
if init is None:
initializer = mx.initializer.Normal(sigma=0.01)
#initializer = mx.initializer.Xavier( rnd_type='gaussian', factor_type="in", magnitude=2)
# initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
else:
initializer = init
# evaluation metrices
eval_metrics = ['accuracy', 'ce']
if args.top_k > 0:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=args.top_k))
# callbacks that run after each batch
args.disp_batches = int((args.num_examples-args.batch_size)/args.batch_size) - 1
batch_end_callbacks = [mx.callback.Speedometer(args.batch_size, args.disp_batches)]
if 'batch_end_callback' in kwargs:
cbs = kwargs['batch_end_callback']
batch_end_callbacks += cbs if isinstance(cbs, list) else [cbs]
# run
model.fit(train,
begin_epoch = args.load_epoch if args.load_epoch else 0,
num_epoch = args.num_epochs,
# eval_data = val,
eval_metric = eval_metrics,
kvstore = kv,
optimizer = args.optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
arg_params = arg_params,
aux_params = aux_params,
batch_end_callback = batch_end_callbacks,
epoch_end_callback = checkpoint,
allow_missing = True,
monitor = monitor)
|
linmajia/dlbench
|
tools/mxnet/common/fit.py
|
Python
|
mit
| 7,649
|
"""Constants for Glances component."""
from __future__ import annotations
from dataclasses import dataclass
import sys
from homeassistant.components.sensor import SensorDeviceClass, SensorEntityDescription
from homeassistant.const import DATA_GIBIBYTES, DATA_MEBIBYTES, PERCENTAGE, TEMP_CELSIUS
DOMAIN = "glances"
CONF_VERSION = "version"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "Glances"
DEFAULT_PORT = 61208
DEFAULT_VERSION = 3
DEFAULT_SCAN_INTERVAL = 60
DATA_UPDATED = "glances_data_updated"
SUPPORTED_VERSIONS = [2, 3]
if sys.maxsize > 2 ** 32:
CPU_ICON = "mdi:cpu-64-bit"
else:
CPU_ICON = "mdi:cpu-32-bit"
@dataclass
class GlancesSensorEntityDescription(SensorEntityDescription):
"""Describe Glances sensor entity."""
type: str | None = None
name_suffix: str | None = None
SENSOR_TYPES: tuple[GlancesSensorEntityDescription, ...] = (
GlancesSensorEntityDescription(
key="disk_use_percent",
type="fs",
name_suffix="used percent",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:harddisk",
),
GlancesSensorEntityDescription(
key="disk_use",
type="fs",
name_suffix="used",
native_unit_of_measurement=DATA_GIBIBYTES,
icon="mdi:harddisk",
),
GlancesSensorEntityDescription(
key="disk_free",
type="fs",
name_suffix="free",
native_unit_of_measurement=DATA_GIBIBYTES,
icon="mdi:harddisk",
),
GlancesSensorEntityDescription(
key="memory_use_percent",
type="mem",
name_suffix="RAM used percent",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:memory",
),
GlancesSensorEntityDescription(
key="memory_use",
type="mem",
name_suffix="RAM used",
native_unit_of_measurement=DATA_MEBIBYTES,
icon="mdi:memory",
),
GlancesSensorEntityDescription(
key="memory_free",
type="mem",
name_suffix="RAM free",
native_unit_of_measurement=DATA_MEBIBYTES,
icon="mdi:memory",
),
GlancesSensorEntityDescription(
key="swap_use_percent",
type="memswap",
name_suffix="Swap used percent",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:memory",
),
GlancesSensorEntityDescription(
key="swap_use",
type="memswap",
name_suffix="Swap used",
native_unit_of_measurement=DATA_GIBIBYTES,
icon="mdi:memory",
),
GlancesSensorEntityDescription(
key="swap_free",
type="memswap",
name_suffix="Swap free",
native_unit_of_measurement=DATA_GIBIBYTES,
icon="mdi:memory",
),
GlancesSensorEntityDescription(
key="processor_load",
type="load",
name_suffix="CPU load",
native_unit_of_measurement="15 min",
icon=CPU_ICON,
),
GlancesSensorEntityDescription(
key="process_running",
type="processcount",
name_suffix="Running",
native_unit_of_measurement="Count",
icon=CPU_ICON,
),
GlancesSensorEntityDescription(
key="process_total",
type="processcount",
name_suffix="Total",
native_unit_of_measurement="Count",
icon=CPU_ICON,
),
GlancesSensorEntityDescription(
key="process_thread",
type="processcount",
name_suffix="Thread",
native_unit_of_measurement="Count",
icon=CPU_ICON,
),
GlancesSensorEntityDescription(
key="process_sleeping",
type="processcount",
name_suffix="Sleeping",
native_unit_of_measurement="Count",
icon=CPU_ICON,
),
GlancesSensorEntityDescription(
key="cpu_use_percent",
type="cpu",
name_suffix="CPU used",
native_unit_of_measurement=PERCENTAGE,
icon=CPU_ICON,
),
GlancesSensorEntityDescription(
key="temperature_core",
type="sensors",
name_suffix="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
GlancesSensorEntityDescription(
key="temperature_hdd",
type="sensors",
name_suffix="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
GlancesSensorEntityDescription(
key="fan_speed",
type="sensors",
name_suffix="Fan speed",
native_unit_of_measurement="RPM",
icon="mdi:fan",
),
GlancesSensorEntityDescription(
key="battery",
type="sensors",
name_suffix="Charge",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:battery",
),
GlancesSensorEntityDescription(
key="docker_active",
type="docker",
name_suffix="Containers active",
native_unit_of_measurement="",
icon="mdi:docker",
),
GlancesSensorEntityDescription(
key="docker_cpu_use",
type="docker",
name_suffix="Containers CPU used",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:docker",
),
GlancesSensorEntityDescription(
key="docker_memory_use",
type="docker",
name_suffix="Containers RAM used",
native_unit_of_measurement=DATA_MEBIBYTES,
icon="mdi:docker",
),
GlancesSensorEntityDescription(
key="used",
type="raid",
name_suffix="Raid used",
icon="mdi:harddisk",
),
GlancesSensorEntityDescription(
key="available",
type="raid",
name_suffix="Raid available",
icon="mdi:harddisk",
),
)
|
home-assistant/home-assistant
|
homeassistant/components/glances/const.py
|
Python
|
apache-2.0
| 5,680
|
import signal
import sys
from threading import Event
def GetInterruptEvent():
e = Event()
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
e.set()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
return e
|
rickbassham/videoencode
|
interrupt.py
|
Python
|
mit
| 302
|
"""This script creates a new demo based on an existing demo. It copies
across all the source files to a new directory, and creates a .vcproj
file in the build directory. This should be run from the scripts directory.
Usage:
new_demo.py base_demo new_demo
base_demo: the demo to copy, including the chapter number, for
example c03_kinematic.
new_demo: the new demo name, including chapter number, for example
c03_newkinematic.
"""
import sys
import os
import os.path
import shutil
def copyDirectory(from_path, to_path, from_name, to_name):
"""Copies across the contents of a directory, changing the names
of files within it to match the new demo."""
# Make sure our destination exists
if not os.path.exists(to_path): os.mkdir(to_path)
# Get the list of entries in this directory
entries = os.listdir(from_path)
for entry in entries:
# Ignore special files
if entry[0] == '.': continue
src_path = os.path.join(from_path, entry)
# If we have a directory, then recurse
if os.path.isdir(src_path):
copyDirectory(src_path, os.path.join(to_path, entry))
# Otherwise copy
else:
to_filename = entry.replace(from_name, to_name)
dest_path = os.path.join(to_path, to_filename)
shutil.copy(src_path, dest_path)
def main():
# Make sure we have the correct number of arguments
if len(sys.argv) < 3:
print __doc__
sys.exit(1)
# Get the demo name
base_demo = sys.argv[1]
base_chapter, base_demo_name = base_demo.split("_", 2)
new_demo = sys.argv[2]
new_chapter, new_demo_name = new_demo.split("_", 2)
# Check the base demo exists and the new one doesn't
base_path = os.path.join('..', 'src', 'demos', base_demo)
if not os.path.isdir(base_path):
print "Your base demo doesn't exist!"
sys.exit(2)
new_path = os.path.join('..', 'src', 'demos', new_demo)
if os.path.exists(new_path):
print "Your new demo already exists!"
sys.exit(2)
# Create the directory and copy across the source files, changing the
# name of any that need it.
copyDirectory(base_path, new_path, base_demo_name, new_demo_name)
# Copy the project files
buildDirectory = os.path.join("..", "build")
projectFiles = [
entry
for entry in os.listdir(buildDirectory)
if entry.startswith(base_demo)
]
for projectFile in projectFiles:
data = open(os.path.join(buildDirectory, projectFile), "r").read()
data = data.replace(base_demo_name, new_demo_name)
data = data.replace(base_demo, new_demo)
newProjectFile = projectFile.replace(base_demo, new_demo)
newf = open(os.path.join(buildDirectory, newProjectFile), 'w')
newf.write(data)
newf.close()
if __name__ == '__main__':
main()
|
idmillington/aicore
|
scripts/new_demo.py
|
Python
|
mit
| 3,017
|
"""Production settings and globals."""
from .base import *
########## HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = []
########## END HOST CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', get_env_variable('MAIL_PASSWORD'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', get_env_variable('MAIL_USER'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## AWS CONFIGURATION
AWS_STORAGE_BUCKET_NAME = get_env_variable('AWS_STORAGE_BUCKET_NAME')
AWS_SECRET_ACCESS_KEY = get_env_variable('AWS_SECRET_ACCESS_KEY')
AWS_ACCESS_KEY_ID = get_env_variable('AWS_ACCESS_KEY_ID')
DEFAULT_FILE_STORAGE = 'mantistrack.s3utils.MediaRootS3BotoStorage'
STATICFILES_STORAGE = 'mantistrack.s3utils.StaticRootS3BotoStorage'
S3_URL = 'http://%s.s3.amazonaws.com/' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = '%sstatic/' % S3_URL
MEDIA_URL = '%smedia/' % S3_URL
INSTALLED_APPS += ('storages',)
########## END AWS CONFIGURATION
########## DATABASE CONFIGURATION
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {}
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = get_env_variable('SECRET_KEY')
########## END SECRET CONFIGURATION
|
archen/mantistrack
|
mantistrack/mantistrack/settings/production.py
|
Python
|
mit
| 2,542
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
#
# sensor metadata-extraction profiles - spot6 ortho-product
#
# Project: XML Metadata Handling
# Authors: Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from .spot6_ortho import ProfileSpot6Ortho
class ProfilePleiades1Ortho(ProfileSpot6Ortho):
version = "2.0"
profile = "PHR_ORTHO"
|
DREAM-ODA-OS/tools
|
metadata/profiles/pleiades1_ortho.py
|
Python
|
mit
| 1,661
|
admin.autodiscover()
flatpages.register()
urlpatterns += [ url(r'^admin/', include(admin.site.urls)), ]
|
bane138/nonhumanuser
|
nonhumanuser/admin.py
|
Python
|
mit
| 103
|
# Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOOKUP = {}
from .paths import add_lookup, lookup_template, clear_lookups
|
carsongee/edx-platform
|
common/djangoapps/edxmako/__init__.py
|
Python
|
agpl-3.0
| 676
|
#
# Copyright (c) 2009-2020 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Engine for generating reports"""
from __future__ import absolute_import
# System imports:
import datetime
import ftplib
import glob
import logging
import os.path
import threading
import time
import traceback
# 3rd party imports
from six.moves import zip
import configobj
# WeeWX imports:
import weeutil.config
import weeutil.logger
import weeutil.weeutil
import weewx.defaults
import weewx.manager
from weeutil.weeutil import to_bool
log = logging.getLogger(__name__)
# spans of valid values for each CRON like field
MINUTES = (0, 59)
HOURS = (0, 23)
DOM = (1, 31)
MONTHS = (1, 12)
DOW = (0, 6)
# valid day names for DOW field
DAY_NAMES = ('sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat')
# valid month names for month field
MONTH_NAMES = ('jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec')
# map month names to month number
MONTH_NAME_MAP = list(zip(('jan', 'feb', 'mar', 'apr',
'may', 'jun', 'jul', 'aug',
'sep', 'oct', 'nov', 'dec'), list(range(1, 13))))
# map day names to day number
DAY_NAME_MAP = list(zip(('sun', 'mon', 'tue', 'wed',
'thu', 'fri', 'sat'), list(range(7))))
# map CRON like nicknames to equivalent CRON like line
NICKNAME_MAP = {
"@yearly": "0 0 1 1 *",
"@anually": "0 0 1 1 *",
"@monthly": "0 0 1 * *",
"@weekly": "0 0 * * 0",
"@daily": "0 0 * * *",
"@hourly": "0 * * * *"
}
# list of valid spans for CRON like fields
SPANS = (MINUTES, HOURS, DOM, MONTHS, DOW)
# list of valid names for CRON lik efields
NAMES = ((), (), (), MONTH_NAMES, DAY_NAMES)
# list of name maps for CRON like fields
MAPS = ((), (), (), MONTH_NAME_MAP, DAY_NAME_MAP)
# =============================================================================
# Class StdReportEngine
# =============================================================================
class StdReportEngine(threading.Thread):
"""Reporting engine for weewx.
This engine runs zero or more reports. Each report uses a skin. A skin
has its own configuration file specifying things such as which 'generators'
should be run, which templates are to be used, what units are to be used,
etc..
A 'generator' is a class inheriting from class ReportGenerator, that
produces the parts of the report, such as image plots, HTML files.
StdReportEngine inherits from threading.Thread, so it will be run in a
separate thread.
See below for examples of generators.
"""
def __init__(self, config_dict, stn_info, record=None, gen_ts=None, first_run=True):
"""Initializer for the report engine.
config_dict: The configuration dictionary.
stn_info: An instance of weewx.station.StationInfo, with static
station information.
record: The current archive record [Optional; default is None]
gen_ts: The timestamp for which the output is to be current
[Optional; default is the last time in the database]
first_run: True if this is the first time the report engine has been
run. If this is the case, then any 'one time' events should be done.
"""
threading.Thread.__init__(self, name="ReportThread")
self.config_dict = config_dict
self.stn_info = stn_info
self.record = record
self.gen_ts = gen_ts
self.first_run = first_run
def run(self):
"""This is where the actual work gets done.
Runs through the list of reports. """
if self.gen_ts:
log.debug("Running reports for time %s",
weeutil.weeutil.timestamp_to_string(self.gen_ts))
else:
log.debug("Running reports for latest time in the database.")
# Iterate over each requested report
for report in self.config_dict['StdReport'].sections:
# Ignore the [[Defaults]] section
if report == 'Defaults':
continue
# See if this report is disabled
enabled = to_bool(self.config_dict['StdReport'][report].get('enable', True))
if not enabled:
log.debug("Report '%s' not enabled. Skipping.", report)
continue
log.debug("Running report '%s'", report)
# Fetch and build the skin_dict:
try:
skin_dict = self._build_skin_dict(report)
except SyntaxError as e:
log.error("Syntax error: %s", e)
log.error(" **** Report ignored")
continue
# Default action is to run the report. Only reason to not run it is
# if we have a valid report report_timing and it did not trigger.
if self.record:
# StdReport called us not wee_reports so look for a report_timing
# entry if we have one.
timing_line = skin_dict.get('report_timing')
if timing_line:
# Get a ReportTiming object.
timing = ReportTiming(timing_line)
if timing.is_valid:
# Get timestamp and interval so we can check if the
# report timing is triggered.
_ts = self.record['dateTime']
_interval = self.record['interval'] * 60
# Is our report timing triggered? timing.is_triggered
# returns True if triggered, False if not triggered
# and None if an invalid report timing line.
if timing.is_triggered(_ts, _ts - _interval) is False:
# report timing was valid but not triggered so do
# not run the report.
log.debug("Report '%s' skipped due to report_timing setting", report)
continue
else:
log.debug("Invalid report_timing setting for report '%s', "
"running report anyway", report)
log.debug(" **** %s", timing.validation_error)
if 'Generators' in skin_dict and 'generator_list' in skin_dict['Generators']:
for generator in weeutil.weeutil.option_as_list(skin_dict['Generators']['generator_list']):
try:
# Instantiate an instance of the class.
obj = weeutil.weeutil.get_object(generator)(
self.config_dict,
skin_dict,
self.gen_ts,
self.first_run,
self.stn_info,
self.record)
except Exception as e:
log.error("Unable to instantiate generator '%s'", generator)
log.error(" **** %s", e)
weeutil.logger.log_traceback(log.error, " **** ")
log.error(" **** Generator ignored")
traceback.print_exc()
continue
try:
# Call its start() method
obj.start()
except Exception as e:
# Caught unrecoverable error. Log it, continue on to the
# next generator.
log.error("Caught unrecoverable exception in generator '%s'", generator)
log.error(" **** %s", e)
weeutil.logger.log_traceback(log.error, " **** ")
log.error(" **** Generator terminated")
traceback.print_exc()
continue
finally:
obj.finalize()
else:
log.debug("No generators specified for report '%s'", report)
def _build_skin_dict(self, report):
"""Find and build the skin_dict for the given report"""
# Start with the defaults in the defaults module. Because we will be modifying it, we need
# to make a deep copy.
skin_dict = weeutil.config.deep_copy(weewx.defaults.defaults)
# Add the report name:
skin_dict['REPORT_NAME'] = report
# Now add the options in the report's skin.conf file. Start by figuring where it is located.
skin_config_path = os.path.join(
self.config_dict['WEEWX_ROOT'],
self.config_dict['StdReport']['SKIN_ROOT'],
self.config_dict['StdReport'][report].get('skin', ''),
'skin.conf')
# Now retrieve the configuration dictionary for the skin. Wrap it in a try block in case we fail. It is ok if
# there is no file - everything for a skin might be defined in the weewx configuration.
try:
merge_dict = configobj.ConfigObj(skin_config_path, file_error=True, encoding='utf-8')
log.debug("Found configuration file %s for report '%s'", skin_config_path, report)
# Merge the skin config file in:
weeutil.config.merge_config(skin_dict, merge_dict)
except IOError as e:
log.debug("Cannot read skin configuration file %s for report '%s': %s",
skin_config_path, report, e)
except SyntaxError as e:
log.error("Failed to read skin configuration file %s for report '%s': %s",
skin_config_path, report, e)
raise
# Now add on the [StdReport][[Defaults]] section, if present:
if 'Defaults' in self.config_dict['StdReport']:
# Because we will be modifying the results, make a deep copy of the [[Defaults]]
# section.
merge_dict = weeutil.config.deep_copy(self.config_dict)['StdReport']['Defaults']
weeutil.config.merge_config(skin_dict, merge_dict)
# Inject any scalar overrides. This is for backwards compatibility. These options should now go
# under [StdReport][[Defaults]].
for scalar in self.config_dict['StdReport'].scalars:
skin_dict[scalar] = self.config_dict['StdReport'][scalar]
# Finally, inject any overrides for this specific report. Because this is the last merge, it will have the
# final say.
weeutil.config.merge_config(skin_dict, self.config_dict['StdReport'][report])
return skin_dict
# =============================================================================
# Class ReportGenerator
# =============================================================================
class ReportGenerator(object):
"""Base class for all report generators."""
def __init__(self, config_dict, skin_dict, gen_ts, first_run, stn_info, record=None):
self.config_dict = config_dict
self.skin_dict = skin_dict
self.gen_ts = gen_ts
self.first_run = first_run
self.stn_info = stn_info
self.record = record
self.db_binder = weewx.manager.DBBinder(self.config_dict)
def start(self):
self.run()
def run(self):
pass
def finalize(self):
self.db_binder.close()
# =============================================================================
# Class FtpGenerator
# =============================================================================
class FtpGenerator(ReportGenerator):
"""Class for managing the "FTP generator".
This will ftp everything in the public_html subdirectory to a webserver."""
def run(self):
import weeutil.ftpupload
# determine how much logging is desired
log_success = to_bool(weeutil.config.search_up(self.skin_dict, 'log_success', True))
log_failure = to_bool(weeutil.config.search_up(self.skin_dict, 'log_failure', True))
t1 = time.time()
try:
local_root = os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict.get('HTML_ROOT', self.config_dict['StdReport']['HTML_ROOT']))
ftp_data = weeutil.ftpupload.FtpUpload(
server=self.skin_dict['server'],
user=self.skin_dict['user'],
password=self.skin_dict['password'],
local_root=local_root,
remote_root=self.skin_dict['path'],
port=int(self.skin_dict.get('port', 21)),
name=self.skin_dict['REPORT_NAME'],
passive=to_bool(self.skin_dict.get('passive', True)),
secure=to_bool(self.skin_dict.get('secure_ftp', False)),
debug=weewx.debug,
secure_data=to_bool(self.skin_dict.get('secure_data', True)),
reuse_ssl=to_bool(self.skin_dict.get('reuse_ssl', False))
)
except KeyError:
log.debug("ftpgenerator: FTP upload not requested. Skipped.")
return
max_tries = int(self.skin_dict.get('max_tries', 3))
for count in range(max_tries):
try:
n = ftp_data.run()
except ftplib.all_errors as e:
log.error("ftpgenerator: (%d): caught exception '%s': %s", count, type(e), e)
weeutil.logger.log_traceback(log.error, " **** ")
else:
if log_success:
t2 = time.time()
log.info("ftpgenerator: Ftp'd %d files in %0.2f seconds", n, (t2 - t1))
break
else:
# The loop completed normally, meaning the upload failed.
if log_failure:
log.error("ftpgenerator: Upload failed")
# =============================================================================
# Class RsynchGenerator
# =============================================================================
class RsyncGenerator(ReportGenerator):
"""Class for managing the "rsync generator".
This will rsync everything in the public_html subdirectory to a server."""
def run(self):
import weeutil.rsyncupload
# We don't try to collect performance statistics about rsync, because
# rsync will report them for us. Check the debug log messages.
try:
local_root = os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict.get('HTML_ROOT', self.config_dict['StdReport']['HTML_ROOT']))
rsync_data = weeutil.rsyncupload.RsyncUpload(
local_root=local_root,
remote_root=self.skin_dict['path'],
server=self.skin_dict['server'],
user=self.skin_dict.get('user'),
port=self.skin_dict.get('port'),
ssh_options=self.skin_dict.get('ssh_options'),
compress=to_bool(self.skin_dict.get('compress', False)),
delete=to_bool(self.skin_dict.get('delete', False)),
log_success=to_bool(weeutil.config.search_up(self.skin_dict, 'log_success', True)))
except KeyError:
log.debug("rsyncgenerator: Rsync upload not requested. Skipped.")
return
try:
rsync_data.run()
except IOError as e:
log.error("rsyncgenerator: Caught exception '%s': %s", type(e), e)
# =============================================================================
# Class CopyGenerator
# =============================================================================
class CopyGenerator(ReportGenerator):
"""Class for managing the 'copy generator.'
This will copy files from the skin subdirectory to the public_html
subdirectory."""
def run(self):
copy_dict = self.skin_dict['CopyGenerator']
# determine how much logging is desired
log_success = to_bool(weeutil.config.search_up(copy_dict, 'log_success', True))
copy_list = []
if self.first_run:
# Get the list of files to be copied only once, at the first
# invocation of the generator. Wrap in a try block in case the
# list does not exist.
try:
copy_list += weeutil.weeutil.option_as_list(copy_dict['copy_once'])
except KeyError:
pass
# Get the list of files to be copied everytime. Again, wrap in a
# try block.
try:
copy_list += weeutil.weeutil.option_as_list(copy_dict['copy_always'])
except KeyError:
pass
# Change directory to the skin subdirectory:
os.chdir(os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict['SKIN_ROOT'],
self.skin_dict['skin']))
# Figure out the destination of the files
html_dest_dir = os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict['HTML_ROOT'])
# The copy list can contain wildcard characters. Go through the
# list globbing any character expansions
ncopy = 0
for pattern in copy_list:
# Glob this pattern; then go through each resultant path:
for path in glob.glob(pattern):
ncopy += weeutil.weeutil.deep_copy_path(path, html_dest_dir)
if log_success:
log.info("Copied %d files to %s", ncopy, html_dest_dir)
# ===============================================================================
# Class ReportTiming
# ===============================================================================
class ReportTiming(object):
"""Class for processing a CRON like line and determining whether it should
be fired for a given time.
The following CRON like capabilities are supported:
- There are two ways to specify the day the line is fired, DOM and DOW. A
match on either all other fields and either DOM or DOW will casue the
line to be fired.
- first-last, *. Matches all possible values for the field concerned.
- step, /x. Matches every xth minute/hour/day etc. May be bounded by a list
or range.
- range, lo-hi. Matches all values from lo to hi inclusive. Ranges using
month and day names are not supported.
- lists, x,y,z. Matches those items in the list. List items may be a range.
Lists using month and day names are not supported.
- month names. Months may be specified by number 1..12 or first 3 (case
insensitive) letters of the English month name jan..dec.
- weekday names. Weekday names may be specified by number 0..7
(0,7 = Sunday) or first 3 (case insensitive) letters of the English
weekday names sun..sat.
- nicknames. Following nicknames are supported:
@yearly : Run once a year, ie "0 0 1 1 *"
@annually : Run once a year, ie "0 0 1 1 *"
@monthly : Run once a month, ie "0 0 1 * *"
@weekly : Run once a week, ie "0 0 * * 0"
@daily : Run once a day, ie "0 0 * * *"
@hourly : Run once an hour, ie "0 * * * *"
Useful ReportTiming class attributes:
is_valid: Whether passed line is a valid line or not.
validation_error: Error message if passed line is an invalid line.
raw_line: Raw line data passed to ReportTiming.
line: 5 item list representing the 5 date/time fields after the
raw line has been processed and dom/dow named parameters
replaced with numeric equivalents.
"""
def __init__(self, raw_line):
"""Initialises a ReportTiming object.
Processes raw line to produce 5 field line suitable for further
processing.
raw_line: The raw line to be processed.
"""
# initialise some properties
self.is_valid = None
self.validation_error = None
# To simplify error reporting keep a copy of the raw line passed to us
# as a string. The raw line could be a list if it included any commas.
# Assume a string but catch the error if it is a list and join the list
# elements to make a string
try:
line_str = raw_line.strip()
except AttributeError:
line_str = ','.join(raw_line).strip()
self.raw_line = line_str
# do some basic checking of the line for unsupported characters
for unsupported_char in ('%', '#', 'L', 'W'):
if unsupported_char in line_str:
self.is_valid = False
self.validation_error = "Unsupported character '%s' in '%s'." % (unsupported_char,
self.raw_line)
return
# Six special time definition 'nicknames' are supported which replace
# the line elements with pre-determined values. These nicknames start
# with the @ character. Check for any of these nicknames and substitute
# the corresponding line.
for nickname, nn_line in NICKNAME_MAP.items():
if line_str == nickname:
line_str = nn_line
break
fields = line_str.split(None, 5)
if len(fields) < 5:
# Not enough fields
self.is_valid = False
self.validation_error = "Insufficient fields found in '%s'" % self.raw_line
return
elif len(fields) == 5:
fields.append(None)
# extract individual line elements
minutes, hours, dom, months, dow, _extra = fields
# save individual fields
self.line = [minutes, hours, dom, months, dow]
# is DOM restricted ie is DOM not '*'
self.dom_restrict = self.line[2] != '*'
# is DOW restricted ie is DOW not '*'
self.dow_restrict = self.line[4] != '*'
# decode the line and generate a set of possible values for each field
(self.is_valid, self.validation_error) = self.decode_fields()
def decode_fields(self):
"""Decode each field and store the sets of valid values.
Set of valid values is stored in self.decode. Self.decode can only be
considered valid if self.is_valid is True. Returns a 2-way tuple
(True|False, ERROR MESSAGE). First item is True is the line is valid
otherwise False. ERROR MESSAGE is None if the line is valid otherwise a
string containing a short error message.
"""
# set a list to hold our decoded ranges
self.decode = []
try:
# step through each field and its associated range, names and maps
for field, span, names, mapp in zip(self.line, SPANS, NAMES, MAPS):
field_set = self.parse_field(field, span, names, mapp)
self.decode.append(field_set)
# if we are this far then our line is valid so return True and no
# error message
return (True, None)
except ValueError as e:
# we picked up a ValueError in self.parse_field() so return False
# and the error message
return (False, e)
def parse_field(self, field, span, names, mapp, is_rorl=False):
"""Return the set of valid values for a field.
Parses and validates a field and if the field is valid returns a set
containing all of the possible field values. Called recursively to
parse sub-fields (eg lists of ranges). If a field is invalid a
ValueError is raised.
field: String containing the raw field to be parsed.
span: Tuple representing the lower and upper numeric values the
field may take. Format is (lower, upper).
names: Tuple containing all valid named values for the field. For
numeric only fields the tuple is empty.
mapp: Tuple of 2 way tuples mapping named values to numeric
equivalents. Format is ((name1, numeric1), ..
(namex, numericx)). For numeric only fields the tuple is empty.
is_rorl: Is field part of a range or list. Either True or False.
"""
field = field.strip()
if field == '*': # first-last
# simply return a set of all poss values
return set(range(span[0], span[1] + 1))
elif field.isdigit(): # just a number
# If its a DOW then replace any 7s with 0
_field = field.replace('7', '0') if span == DOW else field
# its valid if its within our span
if span[0] <= int(_field) <= span[1]:
# it's valid so return the field itself as a set
return set((int(_field),))
else:
# invalid field value so raise ValueError
raise ValueError("Invalid field value '%s' in '%s'" % (field,
self.raw_line))
elif field.lower() in names: # an abbreviated name
# abbreviated names are only valid if not used in a range or list
if not is_rorl:
# replace all named values with numbers
_field = field
for _name, _ord in mapp:
_field = _field.replace(_name, str(_ord))
# its valid if its within our span
if span[0] <= int(_field) <= span[1]:
# it's valid so return the field itself as a set
return set((int(_field),))
else:
# invalid field value so raise ValueError
raise ValueError("Invalid field value '%s' in '%s'" % (field,
self.raw_line))
else:
# invalid use of abbreviated name so raise ValueError
raise ValueError("Invalid use of abbreviated name '%s' in '%s'" % (field,
self.raw_line))
elif ',' in field: # we have a list
# get the first list item and the rest of the list
_first, _rest = field.split(',', 1)
# get _first as a set using a recursive call
_first_set = self.parse_field(_first, span, names, mapp, True)
# get _rest as a set using a recursive call
_rest_set = self.parse_field(_rest, span, names, mapp, True)
# return the union of the _first and _rest sets
return _first_set | _rest_set
elif '/' in field: # a step
# get the value and the step
_val, _step = field.split('/', 1)
# step is valid if it is numeric
if _step.isdigit():
# get _val as a set using a recursive call
_val_set = self.parse_field(_val, span, names, mapp, True)
# get the set of all possible values using _step
_lowest = min(_val_set)
_step_set = set([x for x in _val_set if ((x - _lowest) % int(_step) == 0)])
# return the intersection of the _val and _step sets
return _val_set & _step_set
else:
# invalid step so raise ValueError
raise ValueError("Invalid step value '%s' in '%s'" % (field,
self.raw_line))
elif '-' in field: # we have a range
# get the lo and hi values of the range
lo, hi = field.split('-', 1)
# if lo is numeric and in the span range then the range is valid if
# hi is valid
if lo.isdigit() and span[0] <= int(lo) <= span[1]:
# if hi is numeric and in the span range and greater than or
# equal to lo then the range is valid
if hi.isdigit() and int(hi) >= int(lo) and span[0] <= int(hi) <= span[1]:
# valid range so return a set of the range
return set(range(int(lo), int(hi) + 1))
else:
# something is wrong, we have an invalid field
raise ValueError("Invalid range specification '%s' in '%s'" % (field,
self.raw_line))
else:
# something is wrong with lo, we have an invalid field
raise ValueError("Invalid range specification '%s' in '%s'" % (field,
self.raw_line))
else:
# we have something I don't know how to parse so raise a ValueError
raise ValueError("Invalid field '%s' in '%s'" % (field,
self.raw_line))
def is_triggered(self, ts_hi, ts_lo=None):
"""Determine if CRON like line is to be triggered.
Return True if line is triggered between timestamps ts_lo and ts_hi
(exclusive on ts_lo inclusive on ts_hi), False if it is not
triggered or None if the line is invalid or ts_hi is not valid.
If ts_lo is not specified check for triggering on ts_hi only.
ts_hi: Timestamp of latest time to be checked for triggering.
ts_lo: Timestamp used for earliest time in range of times to be
checked for triggering. May be omitted in which case only
ts_hi is checked.
"""
if self.is_valid and ts_hi is not None:
# setup ts range to iterate over
if ts_lo is None:
_range = [int(ts_hi)]
else:
# CRON like line has a 1 min resolution so step backwards every
# 60 sec.
_range = list(range(int(ts_hi), int(ts_lo), -60))
# Iterate through each ts in our range. All we need is one ts that
# triggers the line.
for _ts in _range:
# convert ts to timetuple and extract required data
trigger_dt = datetime.datetime.fromtimestamp(_ts)
trigger_tt = trigger_dt.timetuple()
month, dow, day, hour, minute = (trigger_tt.tm_mon,
(trigger_tt.tm_wday + 1) % 7,
trigger_tt.tm_mday,
trigger_tt.tm_hour,
trigger_tt.tm_min)
# construct a tuple so we can iterate over and process each
# field
element_tuple = list(zip((minute, hour, day, month, dow),
self.line,
SPANS,
self.decode))
# Iterate over each field and check if it will prevent
# triggering. Remember, we only need a match on either DOM or
# DOW but all other fields must match.
dom_match = False
dom_restricted_match = False
for period, _field, field_span, decode in element_tuple:
if period in decode:
# we have a match
if field_span == DOM:
# we have a match on DOM but we need to know if it
# was a match on a restricted DOM field
dom_match = True
dom_restricted_match = self.dom_restrict
elif field_span == DOW and not (dom_restricted_match or self.dow_restrict or dom_match):
break
continue
elif field_span == DOW and dom_restricted_match or field_span == DOM:
# No match but consider it a match if this field is DOW
# and we already have a DOM match. Also, if we didn't
# match on DOM then continue as we might match on DOW.
continue
else:
# The field will prevent the line from triggerring for
# this ts so we break and move to the next ts.
break
else:
# If we arrived here then all fields match and the line
# would be triggered on this ts so return True.
return True
# If we are here it is because we broke out of all inner for loops
# and the line was not triggered so return False.
return False
else:
# Our line is not valid or we do not have a timestamp to use,
# return None
return None
|
hes19073/hesweewx
|
bin/weewx/reportengine.py
|
Python
|
gpl-3.0
| 33,225
|
# -*- coding: utf-8 -*-
#
# Cipher/DES.py : DES
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""DES symmetric cipher
DES `(Data Encryption Standard)`__ is a symmetric block cipher standardized
by NIST_ . It has a fixed data block size of 8 bytes.
Its keys are 64 bits long, even though 8 bits were used for integrity (now they
are ignored) and do not contribute to securty. The effective key length is
therefore 56 bits only.
DES is cryptographically secure, but its key length is too short by nowadays
standards and it could be brute forced with some effort.
**Use DES, not AES. This module is provided only for legacy purposes.**
As an example, encryption can be done as follows:
>>> from Cryptodome.Cipher import DES
>>>
>>> key = b'-8B key-'
>>> cipher = DES.new(key, DES.MODE_OFB)
>>> plaintext = b'sona si latine loqueris '
>>> msg = cipher.iv + cipher.encrypt(plaintext)
.. __: http://en.wikipedia.org/wiki/Data_Encryption_Standard
.. _NIST: http://csrc.nist.gov/publications/fips/fips46-3/fips46-3.pdf
:undocumented: __package__
"""
import sys
from Cryptodome.Cipher import _create_cipher
from Cryptodome.Util.py3compat import byte_string
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
c_size_t, expect_byte_string)
_raw_des_lib = load_pycryptodome_raw_lib(
"Cryptodome.Cipher._raw_des",
"""
int DES_start_operation(const uint8_t key[],
size_t key_len,
void **pResult);
int DES_encrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int DES_decrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int DES_stop_operation(void *state);
""")
def _create_base_cipher(dict_parameters):
"""This method instantiates and returns a handle to a low-level
base cipher. It will absorb named parameters in the process."""
try:
key = dict_parameters.pop("key")
except KeyError:
raise TypeError("Missing 'key' parameter")
expect_byte_string(key)
if len(key) != key_size:
raise ValueError("Incorrect DES key length (%d bytes)" % len(key))
start_operation = _raw_des_lib.DES_start_operation
stop_operation = _raw_des_lib.DES_stop_operation
cipher = VoidPointer()
result = start_operation(key,
c_size_t(len(key)),
cipher.address_of())
if result:
raise ValueError("Error %X while instantiating the DES cipher"
% result)
return SmartPointer(cipher.get(), stop_operation)
def new(key, mode, *args, **kwargs):
"""Create a new DES cipher
:Parameters:
key : byte string
The secret key to use in the symmetric cipher.
It must be 8 byte long. The parity bits will be ignored.
:Keywords:
mode : a *MODE_** constant
The chaining mode to use for encryption or decryption.
iv : byte string
(*Only* `MODE_CBC`, `MODE_CFB`, `MODE_OFB`, `MODE_OPENPGP`).
The initialization vector to use for encryption or decryption.
For `MODE_OPENPGP`, IV must be 8 bytes long for encryption
and 10 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
For all other modes, it must be 8 bytes long.
If not provided, a random byte string is generated (you can read it
back via the ``iv`` attribute).
nonce : byte string
(*Only* `MODE_EAX` and `MODE_CTR`).
A mandatory value that must never be reused for any other encryption.
For `MODE_CTR`, its length must be in the range ``[0..7]``.
For `MODE_EAX`, there are no restrictions, but it is recommended to
use at least 16 bytes.
If not provided for `MODE_EAX`, a random byte string is generated (you
can read it back via the ``nonce`` attribute).
mac_len : integer
(*Only* `MODE_EAX`). Length of the authentication tag, in bytes.
It must be no larger than 8 (which is the default).
segment_size : integer
(*Only* `MODE_CFB`).The number of **bits** the plaintext and ciphertext
are segmented in. It must be a multiple of 8.
If not specified, it will be assumed to be 8.
initial_value : integer
(*Only* `MODE_CTR`). The initial value for the counter within
the counter block. By default it is 0.
:Return: a DES cipher, of the applicable mode:
- CBC_ mode
- CFB_ mode
- CTR_ mode
- EAX_ mode
- ECB_ mode
- OFB_ mode
- OpenPgp_ mode
.. _CBC: Cryptodome.Cipher._mode_cbc.CbcMode-class.html
.. _CFB: Cryptodome.Cipher._mode_cfb.CfbMode-class.html
.. _CTR: Cryptodome.Cipher._mode_ctr.CtrMode-class.html
.. _EAX: Cryptodome.Cipher._mode_eax.EaxMode-class.html
.. _ECB: Cryptodome.Cipher._mode_ecb.EcbMode-class.html
.. _OFB: Cryptodome.Cipher._mode_ofb.OfbMode-class.html
.. _OpenPgp: Cryptodome.Cipher._mode_openpgp.OpenPgpMode-class.html
"""
return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs)
#: Electronic Code Book (ECB). See `Cryptodome.Cipher._mode_ecb.EcbMode`.
MODE_ECB = 1
#: Cipher-Block Chaining (CBC). See `Cryptodome.Cipher._mode_cbc.CbcMode`.
MODE_CBC = 2
#: Cipher FeedBack (CFB). See `Cryptodome.Cipher._mode_cfb.CfbMode`.
MODE_CFB = 3
#: Output FeedBack (OFB). See `Cryptodome.Cipher._mode_ofb.OfbMode`.
MODE_OFB = 5
#: CounTer Mode (CTR). See `Cryptodome.Cipher._mode_ctr.CtrMode`.
MODE_CTR = 6
#: OpenPGP Mode. See `Cryptodome.Cipher._mode_openpgp.OpenPgpMode`.
MODE_OPENPGP = 7
#: EAX Mode. See `Cryptodome.Cipher._mode_eax.EaxMode`.
MODE_EAX = 9
#: Size of a data block (in bytes)
block_size = 8
#: Size of a key (in bytes)
key_size = 8
|
mchristopher/PokemonGo-DesktopMap
|
app/pylibs/osx64/Cryptodome/Cipher/DES.py
|
Python
|
mit
| 7,176
|
#!/usr/bin/env python3
"""tests.test_io.test_read_gfa.py: tests for exfi.io.read_gfa.py"""
from unittest import TestCase, main
from exfi.io.read_gfa import read_gfa1
from tests.io.gfa1 import \
HEADER, \
SEGMENTS_EMPTY, SEGMENTS_SIMPLE, SEGMENTS_COMPLEX, \
SEGMENTS_COMPLEX_SOFT, SEGMENTS_COMPLEX_HARD, \
LINKS_EMPTY, LINKS_SIMPLE, LINKS_COMPLEX, \
CONTAINMENTS_EMPTY, CONTAINMENTS_SIMPLE, CONTAINMENTS_COMPLEX, \
PATHS_EMPTY, PATHS_SIMPLE, PATHS_COMPLEX, \
GFA1_EMPTY_FN, GFA1_SIMPLE_FN, GFA1_COMPLEX_FN, \
GFA1_COMPLEX_SOFT_FN, GFA1_COMPLEX_HARD_FN
class TestReadGFA1(TestCase):
"""Tests for exfi.io.read_gfa.read_gfa1"""
def test_empty(self):
"""exfi.io.read_gfa.read_gfa1: empty case"""
gfa1 = read_gfa1(GFA1_EMPTY_FN)
self.assertTrue(gfa1['header'].equals(HEADER))
self.assertTrue(gfa1['segments'].equals(SEGMENTS_EMPTY))
self.assertTrue(gfa1['links'].equals(LINKS_EMPTY))
self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_EMPTY))
self.assertTrue(gfa1['paths'].equals(PATHS_EMPTY))
def test_simple(self):
"""exfi.io.read_gfa.read_gfa1: simple case"""
gfa1 = read_gfa1(GFA1_SIMPLE_FN)
self.assertTrue(gfa1['header'].equals(HEADER))
self.assertTrue(gfa1['segments'].equals(SEGMENTS_SIMPLE))
self.assertTrue(gfa1['links'].equals(LINKS_SIMPLE))
self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_SIMPLE))
self.assertTrue(gfa1['paths'].equals(PATHS_SIMPLE))
def test_complex(self):
"""exfi.io.read_gfa.read_gfa1: complex case"""
gfa1 = read_gfa1(GFA1_COMPLEX_FN)
self.assertTrue(gfa1['header'].equals(HEADER))
self.assertTrue(gfa1['segments'].equals(SEGMENTS_COMPLEX))
self.assertTrue(gfa1['links'].equals(LINKS_COMPLEX))
self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_COMPLEX))
self.assertTrue(gfa1['paths'].equals(PATHS_COMPLEX))
def test_complex_soft(self):
"""exfi.io.read_gfa.read_gfa1: complex and soft masking case"""
gfa1 = read_gfa1(GFA1_COMPLEX_SOFT_FN)
self.assertTrue(gfa1['header'].equals(HEADER))
self.assertTrue(gfa1['segments'].equals(SEGMENTS_COMPLEX_SOFT))
self.assertTrue(gfa1['links'].equals(LINKS_COMPLEX))
self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_COMPLEX))
self.assertTrue(gfa1['paths'].equals(PATHS_COMPLEX))
def test_complex_hard(self):
"""exfi.io.read_gfa.read_gfa1: complex and hard masking case"""
gfa1 = read_gfa1(GFA1_COMPLEX_HARD_FN)
self.assertTrue(gfa1['header'].equals(HEADER))
self.assertTrue(gfa1['segments'].equals(SEGMENTS_COMPLEX_HARD))
self.assertTrue(gfa1['links'].equals(LINKS_COMPLEX))
self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_COMPLEX))
self.assertTrue(gfa1['paths'].equals(PATHS_COMPLEX))
if __name__ == '__main__':
main()
|
jlanga/exfi
|
tests/test_io/test_read_gfa.py
|
Python
|
mit
| 2,975
|
from django.utils.translation import ugettext as _
from django.db.models import F
from forum.models.action import ActionProxy, DummyActionProxy
from forum.models import Vote, Flag
from forum import settings
class VoteAction(ActionProxy):
def update_node_score(self, inc):
self.node.score = F('score') + inc
self.node.save()
def process_vote_action(self, value):
self.update_node_score(value)
vote = Vote(node=self.node, user=self.user, action=self, value=value)
vote.save()
def cancel_action(self):
vote = self.vote
self.update_node_score(-vote.value)
vote.delete()
@classmethod
def get_for(cls, user, node):
try:
vote = Vote.objects.get(user=user, node=node)
return vote.value
except:
return None
@classmethod
def get_action_for(cls, user, node):
try:
vote = Vote.objects.get(user=user, node=node)
return vote.action
except:
return None
def describe_vote(self, vote_desc, viewer=None):
return _("%(user)s %(vote_desc)s %(post_desc)s") % {
'user': self.hyperlink(self.user.get_profile_url(), self.friendly_username(viewer, self.user)),
'vote_desc': vote_desc, 'post_desc': self.describe_node(viewer, self.node)
}
class VoteUpAction(VoteAction):
def repute_users(self):
self.repute(self.node.author, int(settings.REP_GAIN_BY_UPVOTED))
def process_action(self):
self.process_vote_action(1)
self.user.reset_vote_up_count_cache()
def cancel_action(self):
super(VoteUpAction, self).cancel_action()
self.user.reset_vote_up_count_cache()
def describe(self, viewer=None):
return self.describe_vote(_("voted up"), viewer)
class VoteDownAction(VoteAction):
def repute_users(self):
self.repute(self.node.author, -int(settings.REP_LOST_BY_DOWNVOTED))
self.repute(self.user, -int(settings.REP_LOST_BY_DOWNVOTING))
def process_action(self):
self.process_vote_action(-1)
self.user.reset_vote_down_count_cache()
def cancel_action(self):
super(VoteDownAction, self).cancel_action()
self.user.reset_vote_down_count_cache()
def describe(self, viewer=None):
return self.describe_vote(_("voted down"), viewer)
class VoteUpCommentAction(VoteUpAction):
def repute_users(self):
pass
def process_action(self):
self.process_vote_action(1)
def cancel_action(self):
super(VoteUpAction, self).cancel_action()
def describe(self, viewer=None):
return self.describe_vote(_("liked"), viewer)
class FlagAction(ActionProxy):
def repute_users(self):
self.repute(self.node.author, -int(settings.REP_LOST_BY_FLAGGED))
def process_action(self):
flag = Flag(user=self.user, node=self.node, action=self, reason=self.extra)
flag.save()
self.node.reset_flag_count_cache()
if self.node.flag_count == int(settings.FLAG_COUNT_TO_HIDE_POST):
self.repute(self.node.author, -int(settings.REP_LOST_BY_FLAGGED_3_TIMES))
if self.node.flag_count == int(settings.FLAG_COUNT_TO_DELETE_POST):
self.repute(self.node.author, -int(settings.REP_LOST_BY_FLAGGED_5_TIMES))
if not self.node.nis.deleted:
DeleteAction(node=self.node, user=self.user, extra="BYFLAGGED").save()
def cancel_action(self):
self.flag.delete()
self.node.reset_flag_count_cache()
@classmethod
def get_for(cls, user, node):
try:
flag = Flag.objects.get(user=user, node=node)
return flag.reason or _("No reason given")
except:
return None
def describe(self, viewer=None):
return _("%(user)s flagged %(post_desc)s: %(reason)s") % {
'user': self.hyperlink(self.user.get_profile_url(), self.friendly_username(viewer, self.user)),
'post_desc': self.describe_node(viewer, self.node), 'reason': self.extra
}
class AcceptAnswerAction(ActionProxy):
def repute_users(self):
if (self.user == self.node.parent.author) and (not self.user == self.node.author):
self.repute(self.user, int(settings.REP_GAIN_BY_ACCEPTING))
if self.user != self.node.author:
self.repute(self.node.author, int(settings.REP_GAIN_BY_ACCEPTED))
def process_action(self):
self.node.marked = True
self.node.nstate.accepted = self
self.node.save()
self.node.question.reset_accepted_count_cache()
def cancel_action(self):
self.node.marked = False
self.node.nstate.accepted = None
self.node.save()
self.node.question.reset_accepted_count_cache()
def describe(self, viewer=None):
answer = self.node
question = answer.parent
if self.user == question.author:
asker = (self.user == viewer) and _("your") or _("his")
else:
asker = self.hyperlink(question.author.get_profile_url(), question.author.username)
return _("%(user)s accepted %(answerer)s answer on %(asker)s question %(question)s") % {
'user': self.hyperlink(self.user.get_profile_url(), self.friendly_username(viewer, self.user)),
'answerer': self.hyperlink(answer.author.get_profile_url(), self.friendly_ownername(viewer, answer.author)),
'asker': asker,
'question': self.hyperlink(question.get_absolute_url(), question.title)
}
class FavoriteAction(ActionProxy):
def process_action(self):
self.node.reset_favorite_count_cache()
def cancel_action(self):
self.process_action()
def describe(self, viewer=None):
return _("%(user)s marked %(post_desc)s as favorite") % {
'user': self.hyperlink(self.user.get_profile_url(), self.friendly_username(viewer, self.user)),
'post_desc': self.describe_node(viewer, self.node),
}
class DeleteAction(ActionProxy):
def process_action(self):
self.node.mark_deleted(self)
if self.node.node_type == "answer":
self.node.question.reset_answer_count_cache()
def cancel_action(self):
self.node.mark_deleted(None)
if self.node.node_type == "answer":
self.node.question.reset_answer_count_cache()
def describe(self, viewer=None):
return _("%(user)s deleted %(post_desc)s") % {
'user': self.hyperlink(self.user.get_profile_url(), self.friendly_username(viewer, self.user)),
'post_desc': self.describe_node(viewer, self.node)
}
def reason(self):
if self.extra != "BYFLAGGED":
return self.extra
else:
return _("flagged by multiple users: ") + "; ".join([f.extra for f in FlagAction.objects.filter(node=self.node)])
class UnknownAction(ActionProxy):
pass
class QuestionViewAction(DummyActionProxy):
def __init__(self, node, user, ip=None):
self.viewuser = user
self.node = node
super(QuestionViewAction, self).__init__(ip)
def process_action(self):
self.node.extra_count = F('extra_count') + 1
self.node.save()
|
CLLKazan/iCQA
|
qa-engine/forum/actions/meta.py
|
Python
|
gpl-3.0
| 7,499
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Test of the optimizers
'''
import numpy as np
import copy
from neon import NervanaObject
from neon.backends import gen_backend
from neon.optimizers import GradientDescentMomentum, RMSProp, Adadelta, Adam, Adagrad
from neon.optimizers import MultiOptimizer
from neon.layers import Conv, Affine, LSTM, GRU
from neon.initializers import Gaussian, Constant
from neon.transforms import Rectlin, Logistic, Tanh
class DummyLayer(object):
def __init__(self, p):
self.p = p[0]
def get_params(self):
return self.p
def compare_tensors(func, param_list, param2, tol=0., epoch=1):
func.optimize([DummyLayer(param_list)], epoch=epoch)
(param, grad), states = param_list[0]
cond = np.sum(np.abs(param.get() - param2) <= tol)
assert cond == np.prod(param2.shape)
def wrap(x):
be = NervanaObject.be
dtypeu = np.float32
return be.array(dtypeu(x))
def test_gdm(backend_default):
lrate, mom, wdecay = 0.1, 0.9, 0.005
gdm = GradientDescentMomentum(
learning_rate=lrate, momentum_coef=mom, wdecay=wdecay)
param = np.random.rand(200, 128)
param2 = copy.deepcopy(param)
grad = 0.01 * np.random.rand(200, 128)
grad2 = grad / 128.
states = [0.01 * np.random.rand(200, 128)]
velocity = states[0]
param2[:] = param2 + velocity * mom - grad2 * lrate - wdecay * lrate * param
param_list = [((wrap(param), wrap(grad)), [wrap(states[0])])]
compare_tensors(gdm, param_list, param2, tol=1e-7)
def test_rmsprop(backend_default):
rms = RMSProp()
param = np.random.rand(200, 128)
param2 = copy.deepcopy(param)
grad = 0.01 * np.random.rand(200, 128)
grad2 = grad / 128.
states = [0.01 * np.random.rand(200, 128)]
state = states[0]
decay = rms.decay_rate
denom = np.sqrt(decay * state + np.square(grad2) * (1.0 - decay) + rms.epsilon) + rms.epsilon
param2[:] -= grad2 * rms.learning_rate / denom
param_list = [((wrap(param), wrap(grad)), [wrap(states[0])])]
compare_tensors(rms, param_list, param2, tol=1e-7)
def test_adadelta(backend_default):
ada = Adadelta()
param = np.random.rand(200, 128)
param2 = copy.deepcopy(param)
grad = 0.01 * np.random.rand(200, 128)
grad2 = grad / 128.
states = [0.01 * np.random.rand(200, 128),
0.01 * np.random.rand(200, 128),
0.01 * np.random.rand(200, 128)]
states2 = [copy.deepcopy(states[0]),
copy.deepcopy(states[1]),
copy.deepcopy(states[2])]
decay = ada.decay
states2[0][:] = states2[0] * decay + (1. - decay) * grad2 * grad2
states2[2][:] = np.sqrt(
(states2[1] + ada.epsilon) / (states2[0] + ada.epsilon)) * grad2
states2[1][:] = states2[1] * decay + (1. - decay) * states2[2] * states2[2]
param2[:] -= states2[2]
param_list = [
((wrap(param), wrap(grad)), [wrap(states[0]), wrap(states[1]), wrap(states[2])])]
compare_tensors(ada, param_list, param2, tol=1e-7)
def test_adagrad(backend_default):
ada = Adagrad()
param = np.random.rand(200, 128)
param2 = copy.deepcopy(param)
grad = 0.01 * np.random.rand(200, 128)
grad2 = grad / 128.
states = [0.01 * np.random.rand(200, 128)]
states2 = [copy.deepcopy(states[0])]
states2[0][:] = states2[0] + np.square(grad2)
denom = np.sqrt(states2[0] + ada.epsilon)
param2[:] -= grad2 * ada.learning_rate / denom
param_list = [
((wrap(param), wrap(grad)), [wrap(states[0])])]
compare_tensors(ada, param_list, param2, tol=1e-7)
def test_adam(backend_default):
adam = Adam()
param = np.random.rand(200, 128)
param2 = copy.deepcopy(param)
grad = 0.01 * np.random.rand(200, 128)
grad2 = grad / 128.
states = [0.01 * np.random.rand(200, 128),
0.01 * np.random.rand(200, 128)]
states2 = [copy.deepcopy(states[0]),
copy.deepcopy(states[1])]
epoch = 1
t = epoch + 1
l = adam.learning_rate * np.sqrt(1 - adam.beta_2 ** t) / (1 - adam.beta_1 ** t)
m, v = states2
m[:] = m * adam.beta_1 + (1. - adam.beta_1) * grad2
v[:] = v * adam.beta_2 + (1. - adam.beta_2) * grad2 * grad2
param2[:] -= l * m / (np.sqrt(v) + adam.epsilon)
param_list = [
((wrap(param), wrap(grad)), [wrap(states[0]), wrap(states[1])])]
compare_tensors(adam, param_list, param2, tol=1e-7, epoch=epoch)
def test_multi_optimizer(backend_default):
opt_gdm = GradientDescentMomentum(
learning_rate=0.001, momentum_coef=0.9, wdecay=0.005)
opt_ada = Adadelta()
opt_adam = Adam()
opt_rms = RMSProp()
opt_rms_1 = RMSProp(gradient_clip_value=5)
init_one = Gaussian(scale=0.01)
l1 = Conv((11, 11, 64), strides=4, padding=3,
init=init_one, bias=Constant(0), activation=Rectlin())
l2 = Affine(nout=4096, init=init_one,
bias=Constant(1), activation=Rectlin())
l3 = LSTM(output_size=1000, init=init_one, activation=Logistic(), gate_activation=Tanh())
l4 = GRU(output_size=100, init=init_one, activation=Logistic(), gate_activation=Tanh())
layers = [l1, l2, l3, l4]
layer_list = []
for layer in layers:
if isinstance(layer, list):
layer_list.extend(layer)
else:
layer_list.append(layer)
opt = MultiOptimizer({'default': opt_gdm,
'Bias': opt_ada,
'Convolution': opt_adam,
'Linear': opt_rms,
'LSTM': opt_rms_1,
'GRU': opt_rms_1})
map_list = opt.map_optimizers(layer_list)
assert map_list[opt_adam][0].__class__.__name__ == 'Convolution'
assert map_list[opt_ada][0].__class__.__name__ == 'Bias'
assert map_list[opt_rms][0].__class__.__name__ == 'Linear'
assert map_list[opt_gdm][0].__class__.__name__ == 'Activation'
assert map_list[opt_rms_1][0].__class__.__name__ == 'LSTM'
assert map_list[opt_rms_1][1].__class__.__name__ == 'GRU'
if __name__ == '__main__':
be = gen_backend(backend='gpu', batch_size=50)
test_multi_optimizer(be)
|
nhynes/neon
|
tests/test_optimizer.py
|
Python
|
apache-2.0
| 6,823
|
'''
Random Breakout AI player
@author: Victor Mayoral Vilches <victor@erlerobotics.com>
'''
import gym
import numpy
import random
import pandas
if __name__ == '__main__':
env = gym.make('Breakout-v0')
env.monitor.start('/tmp/breakout-experiment-1', force=True)
# video_callable=lambda count: count % 10 == 0)
goal_average_steps = 195
max_number_of_steps = 200
last_time_steps = numpy.ndarray(0)
n_bins = 8
n_bins_angle = 10
number_of_features = env.observation_space.shape[0]
last_time_steps = numpy.ndarray(0)
action_attack = [False]*43
action_attack[0] = True
action_right = [False]*43
action_right[10] = True
action_left = [False]*43
action_left[11] = True
actions = [action_attack, action_left, action_right]
for i_episode in xrange(30):
observation = env.reset()
for t in xrange(max_number_of_steps):
env.render()
# Execute the action and get feedback
observation, reward, done, info = env.step(env.action_space.sample())
if done:
break
l = last_time_steps.tolist()
l.sort()
print("Overall score: {:0.2f}".format(last_time_steps.mean()))
print("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.monitor.close()
# gym.upload('/tmp/cartpole-experiment-1', algorithm_id='vmayoral simple Q-learning', api_key='your-key')
|
vmayoral/basic_reinforcement_learning
|
tutorial8/gym/breakout/breakout.py
|
Python
|
gpl-3.0
| 1,469
|
from answer import Answer
from question import Question
class JsonHelper:
@staticmethod
def event_to_json(event=None, questions = False):
response = {
'id': event.id,
'start_time_text': event.start_time.strftime('%Y-%m-%d %H:%M'),
'title': event.title,
'description': event.description,
'video_source' : event.video_source,
'video_id' : event.video_id,
'end_time': event.end_time.strftime('%Y-%m-%d %H:%M') if event.end_time else ''
}
if questions:
response['questions'] = JsonHelper.questions_to_json(event)
return response
@staticmethod
def questions_to_json(event):
query = Question.select().where(Question.event == event)
question_array = [JsonHelper.question_to_json(question) for question in query]
return question_array
@staticmethod
def question_to_json(question):
answer_json = ''
try:
answer = Answer.get(Answer.question == question)
answer_json = answer.JSON()
except Exception:
pass
return {
'id': question.id,
'content': question.content,
'created': question.created.strftime('%Y-%m-%d %H:%M'),
'updated': question.updated.strftime('%Y-%m-%d %H:%M') if question.updated else '',
'answer': answer_json
}
|
citruspi/relier-api
|
relier/models/json_help.py
|
Python
|
unlicense
| 1,609
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.RouteTable":
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_route_tables_operations.py
|
Python
|
mit
| 26,791
|
from app.config.cplog import CPLog
import ConfigParser
log = CPLog(__name__)
class configApp():
s = ['Sabnzbd', 'TheMovieDB', 'NZBsorg', 'Renamer', 'IMDB', 'Intervals']
bool = {'true':True, 'false':False}
def __init__(self, file):
self.file = file
self.p = ConfigParser.RawConfigParser()
self.p.read(file)
self.initConfig()
def parser(self):
return self.p
def sections(self):
return self.s
def set(self, section, option, value):
return self.p.set(section, option, value)
def get(self, section, option):
value = self.p.get(section, option)
if str(value).lower() in self.bool:
return self.bool.get(str(value).lower())
return value if type(value) != str else value.strip()
def initConfig(self):
'''
Create sections, in case the make-config didnt work properly
'''
self.addSection('global')
self.setDefault('global', 'server.environment', 'production')
self.setDefault('global', 'host', '0.0.0.0')
self.setDefault('global', 'port', 5000)
self.setDefault('global', 'username', '')
self.setDefault('global', 'password', '')
self.setDefault('global', 'launchbrowser', True)
self.setDefault('global', 'updater', True)
self.setDefault('global', 'git', 'git')
self.setDefault('global', 'urlBase', '')
self.setDefault('global', 'ignoreWords', '')
self.setDefault('global', 'preferredWords', '')
self.setDefault('global', 'requiredWords', '')
self.addSection('Renamer')
self.setDefault('Renamer', 'enabled', False)
self.setDefault('Renamer', 'download', '')
self.setDefault('Renamer', 'destination', '')
self.setDefault('Renamer', 'folderNaming', '<namethe> (<year>)')
self.setDefault('Renamer', 'fileNaming', '<thename><cd>.<ext>')
self.setDefault('Renamer', 'separator', ' ')
self.setDefault('Renamer', 'cleanup', False)
self.setDefault('Renamer', 'script_enabled', False)
self.setDefault('Renamer', 'file_path', '')
self.addSection('Trailer')
self.setDefault('Trailer', 'quality', False)
self.setDefault('Trailer', 'name', 'movie-trailer')
self.addSection('NZBsorg')
self.setDefault('NZBsorg', 'enabled', True)
self.setDefault('NZBsorg', 'id', '')
self.setDefault('NZBsorg', 'key', '')
self.addSection('NZBMatrix')
self.setDefault('NZBMatrix', 'enabled', True)
self.setDefault('NZBMatrix', 'username', '')
self.setDefault('NZBMatrix', 'apikey', '')
self.setDefault('NZBMatrix', 'english', False)
self.addSection('newzbin')
self.setDefault('newzbin', 'enabled', False)
self.setDefault('newzbin', 'username', '')
self.setDefault('newzbin', 'password', '')
self.addSection('newznab')
self.setDefault('newznab', 'enabled', False)
self.setDefault('newznab', 'host', '')
self.setDefault('newznab', 'apikey', '')
self.addSection('NZBsRUS')
self.setDefault('NZBsRUS', 'enabled', False)
self.setDefault('NZBsRUS', 'userid', '')
self.setDefault('NZBsRUS', 'userhash', '')
self.addSection('x264')
self.setDefault('x264', 'enabled', False)
self.addSection('mysterbin')
self.setDefault('mysterbin', 'enabled', False)
self.addSection('NZB')
self.setDefault('NZB', 'enabled', True)
self.setDefault('NZB', 'retention', 300)
self.setDefault('NZB', 'sendTo', 'Sabnzbd')
self.setDefault('NZB', 'blackhole', '')
self.addSection('Torrents')
self.setDefault('Torrents', 'enabled', False)
self.setDefault('Torrents', 'wait', 24)
self.setDefault('Torrents', 'sendTo', 'Blackhole')
self.setDefault('Torrents', 'blackhole', '')
self.addSection('Sabnzbd')
self.setDefault('Sabnzbd', 'host', 'localhost:8080')
self.setDefault('Sabnzbd', 'apikey', '')
self.setDefault('Sabnzbd', 'username', '')
self.setDefault('Sabnzbd', 'password', '')
self.setDefault('Sabnzbd', 'category', '')
self.setDefault('Sabnzbd', 'ppDir', '')
self.addSection('Transmission')
self.setDefault('Transmission', 'host', 'localhost:9091')
self.setDefault('Transmission', 'username', '')
self.setDefault('Transmission', 'password', '')
self.setDefault('Transmission', 'paused', '')
self.setDefault('Transmission', 'directory', '')
self.setDefault('Transmission', 'ratio', '')
self.addSection('Nzbget')
self.setDefault('Nzbget', 'host', '')
self.setDefault('Nzbget', 'password', 'tegbzn6789')
self.setDefault('Nzbget', 'category', 'Movies')
self.addSection('TheMovieDB')
self.setDefault('TheMovieDB', 'key', '9b939aee0aaafc12a65bf448e4af9543')
self.addSection('RottenTomatoes')
self.setDefault('RottenTomatoes', 'key', 'ht6hjvs5ez52am3tar58qxah')
self.addSection('IMDB')
self.addSection('Intervals')
self.setDefault('Intervals', 'search', '24')
self.setDefault('Intervals', 'renamer', '5')
self.addSection('Quality')
self.setDefault('Quality', 'hide', 'cam')
self.setDefault('Quality', 'default', '720p')
from app.lib.qualities import Qualities
for type in Qualities.types.itervalues():
self.setDefault('Quality', 'sMin-' + type['key'], type['size'][0])
self.setDefault('Quality', 'sMax-' + type['key'], type['size'][1])
self.addSection('Subtitles')
self.setDefault('Subtitles', 'enabled', False)
self.setDefault('Subtitles', 'languages', 'en')
self.setDefault('Subtitles', 'addLanguage', True)
self.setDefault('Subtitles', 'name', 'filename') #filename, subtitle
self.addSection('MovieETA')
self.setDefault('MovieETA', 'enabled', True)
self.addSection('MovieRSS')
self.setDefault('MovieRSS', 'enabled', False)
self.setDefault('MovieRSS', 'minyear', '2000')
self.setDefault('MovieRSS', 'minrating', '6.0')
self.setDefault('MovieRSS', 'minvotes', '900')
self.addSection('KinepolisRSS')
self.setDefault('KinepolisRSS', 'enabled', False)
self.setDefault('KinepolisRSS', 'minyear', '2000')
self.setDefault('KinepolisRSS', 'minrating', '6.0')
self.setDefault('KinepolisRSS', 'minvotes', '900')
self.addSection('Trakt')
self.setDefault('Trakt', 'watchlist_enabled', False)
self.setDefault('Trakt', 'notification_enabled', False)
self.setDefault('Trakt', 'watchlist_remove', False)
self.setDefault('Trakt', 'dontaddcollection', True)
self.setDefault('Trakt', 'apikey', '')
self.setDefault('Trakt', 'username', '')
self.setDefault('Trakt', 'password', '')
self.addSection('IMDBWatchlist')
self.setDefault('IMDBWatchlist', 'enabled', False)
self.setDefault('IMDBWatchlist', 'url', '')
self.addSection('XBMC')
self.setDefault('XBMC', 'enabled', False)
self.setDefault('XBMC', 'onSnatch', False)
self.setDefault('XBMC', 'host', 'localhost')
self.setDefault('XBMC', 'username', 'xbmc')
self.setDefault('XBMC', 'password', 'xbmc')
self.setDefault('XBMC', 'dbpath', '')
self.setDefault('XBMC', 'updateOneOnly', False)
self.setDefault('XBMC', 'useWebAPIExistingCheck', False)
self.addSection('NMJ')
self.setDefault('NMJ', 'enabled', False)
self.setDefault('NMJ', 'host', '')
self.setDefault('NMJ', 'database', '')
self.setDefault('NMJ', 'mount', '')
self.addSection('PLEX')
self.setDefault('PLEX', 'enabled', False)
self.setDefault('PLEX', 'host', '')
self.addSection('PROWL')
self.setDefault('PROWL', 'enabled', False)
self.setDefault('PROWL', 'onSnatch', False)
self.setDefault('PROWL', 'keys', '')
self.setDefault('PROWL', 'priority', '0')
self.addSection('GROWL')
self.setDefault('GROWL', 'enabled', False)
self.setDefault('GROWL', 'onSnatch', False)
self.setDefault('GROWL', 'host', 'localhost')
self.setDefault('GROWL', 'password', '')
self.addSection('Notifo')
self.setDefault('Notifo', 'enabled', False)
self.setDefault('Notifo', 'onSnatch', False)
self.setDefault('Notifo', 'username', '')
self.setDefault('Notifo', 'key', '')
self.addSection('Boxcar')
self.setDefault('Boxcar', 'enabled', False)
self.setDefault('Boxcar', 'onSnatch', False)
self.setDefault('Boxcar', 'username', '')
self.addSection('NMA')
self.setDefault('NMA', 'enabled', False)
self.setDefault('NMA', 'onSnatch', False)
self.setDefault('NMA', 'apikey', '')
self.setDefault('NMA', 'devkey', '')
self.setDefault('NMA', 'priority', '0')
self.addSection('NMWP')
self.setDefault('NMWP', 'enabled', False)
self.setDefault('NMWP', 'onSnatch', False)
self.setDefault('NMWP', 'apikey', '')
self.setDefault('NMWP', 'devkey', '')
self.setDefault('NMWP', 'priority', '0')
self.addSection('Twitter')
self.setDefault('Twitter', 'enabled', False)
self.setDefault('Twitter', 'onSnatch', False)
self.setDefault('Twitter', 'username', '')
self.setDefault('Twitter', 'password', '')
self.setDefault('Twitter', 'isAuthenticated', False)
self.addSection('Synoindex')
self.setDefault('Synoindex', 'enabled', False)
self.addSection('Meta')
self.setDefault('Meta', 'enabled', False)
self.setDefault('Meta', 'urlOnly', False)
self.setDefault('Meta', 'fanartMinHeight', 0)
self.setDefault('Meta', 'fanartMinWidth', 0)
self.setDefault('Meta', 'posterMinHeight', 0)
self.setDefault('Meta', 'posterMinWidth', 0)
self.setDefault('Meta', 'fanartFileName', 'fanart.<orig_ext>')
self.setDefault('Meta', 'posterFileName', 'movie.tbn')
self.setDefault('Meta', 'nfoFileName', 'movie.nfo')
self.save()
def save(self):
with open(self.file, 'wb') as configfile:
self.p.write(configfile)
def addSection(self, section):
if not self.p.has_section(section):
self.p.add_section(section)
def setDefault(self, section, option, value):
if not self.p.has_option(section, option):
self.p.set(section, option, value)
class Auth():
def __init__(self, username, password):
self.u = username
self.p = password
def test(self, environ, username, password):
if username == self.u and password == self.p:
return True
else:
return False
|
CouchPotato/CouchPotatoV1
|
app/config/configApp.py
|
Python
|
gpl-3.0
| 11,038
|
import re
from cfme.common.provider import BaseProvider
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import (
Region, Form, AngularSelect, form_buttons, Input, Quadicon
)
from cfme.web_ui.menu import nav
from utils.db import cfmedb
from utils.varmeth import variable
from . import cfg_btn, mon_btn, pol_btn, download, MiddlewareBase
details_page = Region(infoblock_type='detail')
def _db_select_query(name=None, type=None):
"""column order: `id`, `name`, `type`"""
t_ems = cfmedb()['ext_management_systems']
query = cfmedb().session.query(t_ems.id, t_ems.name, t_ems.type)
if name:
query = query.filter(t_ems.name == name)
if type:
query = query.filter(t_ems.type == type)
return query
def _get_providers_page():
sel.force_navigate('middleware_providers')
nav.add_branch(
'middleware_providers',
{
'middleware_provider_new':
lambda _: cfg_btn('Add a New Middleware Provider'),
'middleware_provider':
[
lambda ctx: sel.check(Quadicon(ctx['provider'].name).checkbox),
{
'middleware_provider_edit':
lambda _: cfg_btn('Edit Selected Middleware Provider'),
'middleware_provider_edit_tags':
lambda _: pol_btn('Edit Tags')
}],
'middleware_provider_detail':
[
lambda ctx: sel.click(Quadicon(ctx['provider'].name)),
{
'middleware_provider_edit_detail':
lambda _: cfg_btn('Edit this Middleware Provider'),
'middleware_provider_timelines_detail':
lambda _: mon_btn('Timelines'),
'middleware_provider_edit_tags_detail':
lambda _: pol_btn('Edit Tags'),
}]
}
)
properties_form = Form(
fields=[
('type_select', AngularSelect('server_emstype')),
('name_text', Input('name')),
('hostname_text', Input('hostname')),
('port_text', Input('port'))
])
class HawkularProvider(MiddlewareBase, BaseProvider):
"""
HawkularProvider class holds provider data. Used to perform actions on hawkular provider page
Args:
name: Name of the provider
hostname: Hostname/IP of the provider
port: http/https port of hawkular provider
credentials: see Credential inner class.
key: The CFME key of the provider in the yaml.
db_id: database row id of provider
Usage:
myprov = HawkularProvider(name='foo',
hostname='localhost',
port=8080,
credentials=Provider.Credential(principal='admin', secret='foobar')))
myprov.create()
myprov.num_deployment(method="ui")
"""
STATS_TO_MATCH = ['num_server', 'num_deployment', 'num_datasource']
property_tuples = [('name', 'name'), ('hostname', 'host_name'), ('port', 'port'),
('provider_type', 'type')]
page_name = 'middleware'
string_name = 'Middleware'
detail_page_suffix = 'provider_detail'
edit_page_suffix = 'provider_edit_detail'
refresh_text = "Refresh items and relationships"
quad_name = None
_properties_form = properties_form
add_provider_button = form_buttons.FormButton("Add this Middleware Provider")
save_button = form_buttons.FormButton("Save Changes")
taggable_type = 'ExtManagementSystem'
def __init__(self, name=None, hostname=None, port=None, credentials=None, key=None, **kwargs):
self.name = name
self.hostname = hostname
self.port = port
self.provider_type = 'Hawkular'
if not credentials:
credentials = {}
self.credentials = credentials
self.key = key
self.db_id = kwargs['db_id'] if 'db_id' in kwargs else None
def _form_mapping(self, create=None, **kwargs):
return {'name_text': kwargs.get('name'),
'type_select': create and 'Hawkular',
'hostname_text': kwargs.get('hostname'),
'port_text': kwargs.get('port')}
@variable(alias='db')
def num_deployment(self):
return self._num_db_generic('middleware_deployments')
@num_deployment.variant('ui')
def num_deployment_ui(self, reload_data=True):
if reload_data:
self.summary.reload()
return self.summary.relationships.middleware_deployments.value
@variable(alias='db')
def num_server(self):
return self._num_db_generic('middleware_servers')
@num_server.variant('ui')
def num_server_ui(self, reload_data=True):
if reload_data:
self.summary.reload()
return self.summary.relationships.middleware_servers.value
@variable(alias='db')
def num_datasource(self):
return self._num_db_generic('middleware_datasources')
@num_datasource.variant('ui')
def num_datasource_ui(self, reload_data=True):
if reload_data:
self.summary.reload()
return self.summary.relationships.middleware_datasources.value
@variable(alias='ui')
def is_refreshed(self, reload_data=True):
if reload_data:
self.summary.reload()
if re.match('Success.*Minute.*Ago', self.summary.status.last_refresh.text_value):
return True
else:
return False
@is_refreshed.variant('db')
def is_refreshed_db(self):
ems = cfmedb()['ext_management_systems']
dates = cfmedb().session.query(ems.created_on,
ems.updated_on).filter(ems.name == self.name).first()
return dates.updated_on > dates.created_on
@classmethod
def download(cls, extension):
_get_providers_page()
download(extension)
def load_details(self, refresh=False):
"""Call super class `load_details` and load `db_id` if not set"""
BaseProvider.load_details(self, refresh=refresh)
if not self.db_id or refresh:
tmp_provider = _db_select_query(
name=self.name, type='ManageIQ::Providers::Hawkular::MiddlewareManager').first()
self.db_id = tmp_provider.id
|
akrzos/cfme_tests
|
cfme/middleware/provider.py
|
Python
|
gpl-2.0
| 6,224
|
from sandglass.time.api import API
from sandglass.time.api import ApiDescribeResource
class ApiV1DescribeResource(ApiDescribeResource):
"""
Resource to describe API version 1.
"""
version = "v1"
def describe(self):
resource_info_list = []
for resource in self.resources:
path = resource.get_collection_path()
resource_info = {
'name': resource.name,
'path': path,
'describe': "{}@describe".format(path),
'doc': (resource.__doc__.strip() if resource.__doc__ else ''),
}
resource_info_list.append(resource_info)
data = {
'version': self.version,
'resources': resource_info_list,
}
return data
def includeme(config):
"""
Load API version 1 resources.
"""
# API version must be the last item in route_prefix
version = config.route_prefix.split('/')[-1]
# Add support for describing resources in current API
config.add_resource_describe(version, ApiV1DescribeResource)
# Load API REST routes for current config path
config.add_api_rest_routes()
# Attach resources to API REST routes
for resource in API.get_resources(version):
config.add_rest_resource(resource)
|
sanglass/sandglass.time
|
sandglass/time/api/v1/__init__.py
|
Python
|
bsd-3-clause
| 1,317
|
### clustering.py
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#import matplotlib
#matplotlib.use('GTKAgg')
import sys, os, string
command_args = string.join(sys.argv,' ')
if len(sys.argv[1:])>0 and '--' in command_args: commandLine=True
else: commandLine=False
import traceback
try:
import math
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import matplotlib
if commandLine==False:
try: matplotlib.use('TkAgg')
except Exception: pass
try: matplotlib.rcParams['backend'] = 'TkAgg'
except Exception: pass
else:
### TkAgg doesn't work when AltAnalyze is run on the command-line
try: matplotlib.use('Agg')
except Exception: pass
try: matplotlib.rcParams['backend'] = 'Agg'
except Exception: pass
try:
import matplotlib.pyplot as pylab
import matplotlib.colors as mc
import matplotlib.mlab as mlab
import matplotlib.ticker as tic
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d import Axes3D
matplotlib.rcParams['axes.linewidth'] = 0.5
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Arial'
except Exception:
print traceback.format_exc()
print 'Matplotlib support not enabled'
import scipy
try: from scipy.sparse.csgraph import _validation
except Exception: pass
from scipy.linalg import svd
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
try: import numpy; np = numpy
except Exception:
print 'Numpy import error...'
print traceback.format_exc()
try:
import igraph.vendor.texttable
except ImportError: pass
try:
from sklearn.decomposition import PCA, FastICA
except Exception: pass
#pylab.ion() # closes Tk window after show - could be nice to include
except Exception:
print traceback.format_exc()
pass
import time
import unique
import statistics
import os
import export
import webbrowser
import warnings
import UI
try:
warnings.simplefilter("ignore", numpy.ComplexWarning)
warnings.simplefilter("ignore", DeprecationWarning) ### Annoying depreciation warnings (occurs in sch somewhere)
#This shouldn't be needed in python 2.7 which suppresses DeprecationWarning - Larsson
except Exception: None
import WikiPathways_webservice
try:
import fastcluster as fc
#print 'Using fastcluster instead of scipy hierarchical cluster'
#fc = sch
except Exception:
#print 'Using scipy insteady of fastcluster (not installed)'
try: fc = sch ### fastcluster uses the same convention names for linkage as sch
except Exception: print 'Scipy support not present...'
def getColorRange(x):
""" Determines the range of colors, centered at zero, for normalizing cmap """
vmax=x.max()
vmin=x.min()
if vmax<0 and vmin<0: direction = 'negative'
elif vmax>0 and vmin>0: direction = 'positive'
else: direction = 'both'
if direction == 'both':
vmax = max([vmax,abs(vmin)])
vmin = -1*vmax
return vmax,vmin
else:
return vmax,vmin
def heatmap(x, row_header, column_header, row_method, column_method, row_metric, column_metric, color_gradient,
dataset_name, display=False, contrast=None, allowAxisCompression=True,Normalize=True,PriorColumnClusters=None, PriorRowClusters=None):
print "Performing hieararchical clustering using %s for columns and %s for rows" % (column_metric,row_metric)
show_color_bars = True ### Currently, the color bars don't exactly reflect the dendrogram colors
try: ExportCorreleationMatrix = exportCorreleationMatrix
except Exception: ExportCorreleationMatrix = False
try: os.mkdir(root_dir) ### May need to create this directory
except Exception: None
if display == False:
pylab.figure() ### Add this to avoid a Tkinter bug after running MarkerFinder (not sure why it is needed) - creates a second empty window when display == True
if row_method == 'hopach' or column_method == 'hopach':
### Test R and hopach
"""
try:
import R_test
except Exception,e:
#print traceback.format_exc()
print 'Failed to install hopach or R not installed (install R before using hopach)'
row_method = 'average'; column_method = 'average'
if len(column_header)==2: column_method = 'average'
if len(row_header)==2: row_method = 'average'
"""
pass
"""
Prototype methods:
http://old.nabble.com/How-to-plot-heatmap-with-matplotlib--td32534593.html
http://stackoverflow.com/questions/7664826/how-to-get-flat-clustering-corresponding-to-color-clusters-in-the-dendrogram-cre
Scaling the color gradient so that zero is white:
http://stackoverflow.com/questions/2369492/generate-a-heatmap-in-matplotlib-using-a-scatter-data-set
Other cluster methods:
http://stackoverflow.com/questions/9362304/how-to-get-centroids-from-scipys-hierarchical-agglomerative-clustering
x is a m by n ndarray, m observations, n genes
"""
### Perform the associated clustering by HOPACH via PYPE or Rpy to R
if row_method == 'hopach' or column_method == 'hopach':
try:
""" HOPACH is a clustering method implemented in R that builds a hierarchical tree of clusters by recursively
partitioning a data set, while ordering and possibly collapsing clusters at each level:
http://www.bioconductor.org/packages/release/bioc/html/hopach.html
"""
import R_interface
#reload(R_interface)
if row_method == 'hopach' and column_method == 'hopach': cluster_method = 'both'
elif row_method == 'hopach': cluster_method = 'gene'
else: cluster_method = 'array'
if row_metric == 'cosine': metric_gene = "euclid"
elif row_metric == 'euclidean': metric_gene = "cosangle"
elif row_metric == 'correlation': metric_gene = "cor"
else: metric_gene = "cosangle"
if column_metric == 'cosine': metric_array = "euclid"
elif column_metric == 'euclidean': metric_array = "cosangle"
elif column_metric == 'correlation': metric_array = "cor"
else: metric_array = "euclid"
### Returned are the row_order and column_order in the Scipy clustering output format
newFilename, Z1, Z2 = R_interface.remoteHopach(inputFilename,cluster_method,metric_gene,metric_array)
if newFilename != inputFilename:
### If there were duplicates, re-import the matrix data for the cleaned up filename
try:
matrix, column_header, row_header, dataset_name, group_db = importData(newFilename,Normalize=normalize,reverseOrder=False)
except Exception:
matrix, column_header, row_header, dataset_name, group_db = importData(newFilename)
x = numpy.array(matrix)
except Exception:
row_method = 'average'; column_method = 'average'
print traceback.format_exc()
print 'hopach failed... continue with an alternative method'
skipClustering = False
try:
if len(PriorColumnClusters)>0 and len(PriorRowClusters)>0 and row_method==None and column_method == None:
print 'Prior generated clusters being used rather re-clustering'
"""
try:
if len(targetGeneIDs)>0:
PriorColumnClusters=[] ### If orderded genes input, we want to retain this order rather than change
except Exception: pass
"""
if len(PriorColumnClusters)>0: ### this corresponds to the above line
Z1={}; Z2={}
Z1['level'] = PriorRowClusters; Z1['level'].reverse()
Z2['level'] = PriorColumnClusters; #Z2['level'].reverse()
Z1['leaves'] = range(0,len(row_header)); #Z1['leaves'].reverse()
Z2['leaves'] = range(0,len(column_header)); #Z2['leaves'].reverse()
row_method = 'hopach'
column_method = 'hopach'
skipClustering = True
except Exception,e:
#print traceback.format_exc()
pass
n = len(x[0]); m = len(x)
if color_gradient == 'red_white_blue':
cmap=pylab.cm.bwr
if color_gradient == 'red_black_sky':
cmap=RedBlackSkyBlue()
if color_gradient == 'red_black_blue':
cmap=RedBlackBlue()
if color_gradient == 'red_black_green':
cmap=RedBlackGreen()
if color_gradient == 'yellow_black_blue':
cmap=YellowBlackBlue()
if color_gradient == 'black_yellow_blue':
cmap=BlackYellowBlue()
if color_gradient == 'seismic':
cmap=pylab.cm.seismic
if color_gradient == 'green_white_purple':
cmap=pylab.cm.PiYG_r
if color_gradient == 'coolwarm':
cmap=pylab.cm.coolwarm
vmin=x.min()
vmax=x.max()
vmax = max([vmax,abs(vmin)])
if Normalize != False:
vmin = vmax*-1
elif 'Clustering-Zscores-' in dataset_name:
vmin = vmax*-1
default_window_hight = 8.5
default_window_width = 12
if len(column_header)>80:
default_window_width = 14
if len(column_header)>100:
default_window_width = 16
if contrast == None:
scaling_factor = 2.5 #2.5
else:
try: scaling_factor = float(contrast)
except Exception: scaling_factor = 2.5
#print vmin/scaling_factor
norm = matplotlib.colors.Normalize(vmin/scaling_factor, vmax/scaling_factor) ### adjust the max and min to scale these colors by 2.5 (1 scales to the highest change)
fig = pylab.figure(figsize=(default_window_width,default_window_hight)) ### could use m,n to scale here
pylab.rcParams['font.size'] = 7.5
if show_color_bars == False:
color_bar_w = 0.000001 ### Invisible but not gone (otherwise an error persists)
else:
color_bar_w = 0.0125 ### Sufficient size to show
bigSampleDendrogram = True
if bigSampleDendrogram == True and row_method==None and column_method != None and allowAxisCompression == True:
dg2 = 0.30
dg1 = 0.43
else: dg2 = 0.1; dg1 = 0.63
try:
if EliteGeneSets != [''] and EliteGeneSets !=[]:
matrix_horiz_pos = 0.27
elif skipClustering:
if len(row_header)<100:
matrix_horiz_pos = 0.20
else:
matrix_horiz_pos = 0.27
else:
matrix_horiz_pos = 0.14
except Exception:
matrix_horiz_pos = 0.14
## calculate positions for all elements
# ax1, placement of dendrogram 1, on the left of the heatmap
[ax1_x, ax1_y, ax1_w, ax1_h] = [0.05,0.235,matrix_horiz_pos,dg1] ### The last controls matrix hight, second value controls the position of the matrix relative to the bottom of the view [0.05,0.22,0.2,0.6]
width_between_ax1_axr = 0.004
height_between_ax1_axc = 0.004 ### distance between the top color bar axis and the matrix
# axr, placement of row side colorbar
[axr_x, axr_y, axr_w, axr_h] = [0.31,0.1,color_bar_w-0.002,0.6] ### second to last controls the width of the side color bar - 0.015 when showing [0.31,0.1,color_bar_w,0.6]
axr_x = ax1_x + ax1_w + width_between_ax1_axr
axr_y = ax1_y; axr_h = ax1_h
width_between_axr_axm = 0.004
# axc, placement of column side colorbar (3rd value controls the width of the matrix!)
[axc_x, axc_y, axc_w, axc_h] = [0.4,0.63,0.6,color_bar_w] ### last one controls the hight of the top color bar - 0.015 when showing [0.4,0.63,0.5,color_bar_w]
axc_x = axr_x + axr_w + width_between_axr_axm
axc_y = ax1_y + ax1_h + height_between_ax1_axc
height_between_axc_ax2 = 0.004
# axm, placement of heatmap for the data matrix
[axm_x, axm_y, axm_w, axm_h] = [0.4,0.9,2.5,0.5] #[0.4,0.9,2.5,0.5]
axm_x = axr_x + axr_w + width_between_axr_axm
axm_y = ax1_y; axm_h = ax1_h
axm_w = axc_w
# ax2, placement of dendrogram 2, on the top of the heatmap
[ax2_x, ax2_y, ax2_w, ax2_h] = [0.3,0.72,0.6,dg2] ### last one controls hight of the dendrogram [0.3,0.72,0.6,0.135]
ax2_x = axr_x + axr_w + width_between_axr_axm
ax2_y = ax1_y + ax1_h + height_between_ax1_axc + axc_h + height_between_axc_ax2
ax2_w = axc_w
# axcb - placement of the color legend
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.02,0.938,0.17,0.025] ### Last one controls the hight [0.07,0.88,0.18,0.076]
# axcc - placement of the colum colormap legend colormap (distinct map)
[axcc_x, axcc_y, axcc_w, axcc_h] = [0.02,0.12,0.17,0.025] ### Last one controls the hight [0.07,0.88,0.18,0.076]
# Compute and plot top dendrogram
if column_method == 'hopach':
ind2 = numpy.array(Z2['level']) ### from R_interface - hopach root cluster level
elif column_method != None:
start_time = time.time()
#print x;sys.exit()
d2 = dist.pdist(x.T)
#print d2
#import mdistance2
#d2 = mdistance2.mpdist(x.T)
#print d2;sys.exit()
D2 = dist.squareform(d2)
ax2 = fig.add_axes([ax2_x, ax2_y, ax2_w, ax2_h], frame_on=False)
if ExportCorreleationMatrix:
new_matrix=[]
for i in D2:
#string.join(map(inverseDist,i),'\t')
log2_data = map(inverseDist,i)
avg = statistics.avg(log2_data)
log2_norm = map(lambda x: x-avg,log2_data)
new_matrix.append(log2_norm)
x = numpy.array(new_matrix)
row_header = column_header
#sys.exit()
Y2 = fc.linkage(D2, method=column_method, metric=column_metric) ### array-clustering metric - 'average', 'single', 'centroid', 'complete'
#Y2 = sch.fcluster(Y2, 10, criterion = "maxclust")
try: Z2 = sch.dendrogram(Y2)
except Exception:
if column_method == 'average':
column_metric = 'euclidean'
else: column_method = 'average'
Y2 = fc.linkage(D2, method=column_method, metric=column_metric)
Z2 = sch.dendrogram(Y2)
#ind2 = sch.fcluster(Y2,0.6*D2.max(), 'distance') ### get the correlations
#ind2 = sch.fcluster(Y2,0.2*D2.max(), 'maxclust') ### alternative method biased based on number of clusters to obtain (like K-means)
ind2 = sch.fcluster(Y2,0.7*max(Y2[:,2]),'distance') ### This is the default behavior of dendrogram
ax2.set_xticks([]) ### Hides ticks
ax2.set_yticks([])
time_diff = str(round(time.time()-start_time,1))
print 'Column clustering completed in %s seconds' % time_diff
else:
ind2 = ['NA']*len(column_header) ### Used for exporting the flat cluster data
# Compute and plot left dendrogram
if row_method == 'hopach':
ind1 = numpy.array(Z1['level']) ### from R_interface - hopach root cluster level
elif row_method != None:
start_time = time.time()
d1 = dist.pdist(x)
D1 = dist.squareform(d1) # full matrix
# postion = [left(x), bottom(y), width, height]
#print D1;sys.exit()
Y1 = fc.linkage(D1, method=row_method, metric=row_metric) ### gene-clustering metric - 'average', 'single', 'centroid', 'complete'
no_plot=False ### Indicates that we want to show the dendrogram
try:
if runGOElite: no_plot = True
elif len(PriorColumnClusters)>0 and len(PriorRowClusters)>0 and row_method==None and column_method == None:
no_plot = True ### If trying to instantly view prior results, no dendrogram will be display, but prior GO-Elite can
else:
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=False) # frame_on may be False - this window conflicts with GO-Elite labels
except Exception:
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=False) # frame_on may be False
try: Z1 = sch.dendrogram(Y1, orientation='right',no_plot=no_plot) ### This is where plotting occurs
except Exception:
row_method = 'average'
try:
Y1 = fc.linkage(D1, method=row_method, metric=row_metric)
Z1 = sch.dendrogram(Y1, orientation='right',no_plot=no_plot)
except Exception:
row_method = 'ward'
Y1 = fc.linkage(D1, method=row_method, metric=row_metric)
Z1 = sch.dendrogram(Y1, orientation='right',no_plot=no_plot)
#ind1 = sch.fcluster(Y1,0.6*D1.max(),'distance') ### get the correlations
#ind1 = sch.fcluster(Y1,0.2*D1.max(),'maxclust')
ind1 = sch.fcluster(Y1,0.7*max(Y1[:,2]),'distance') ### This is the default behavior of dendrogram
if ExportCorreleationMatrix:
Z1 = sch.dendrogram(Y2, orientation='right')
Y1 = Y2
d1 = d2
D1 = D2
ind1 = ind2
try: ax1.set_xticks([]); ax1.set_yticks([]) ### Hides ticks
except Exception: pass
time_diff = str(round(time.time()-start_time,1))
print 'Row clustering completed in %s seconds' % time_diff
else:
ind1 = ['NA']*len(row_header) ### Used for exporting the flat cluster data
# Plot distance matrix.
axm = fig.add_axes([axm_x, axm_y, axm_w, axm_h]) # axes for the data matrix
xt = x
if column_method != None:
idx2 = Z2['leaves'] ### apply the clustering for the array-dendrograms to the actual matrix data
xt = xt[:,idx2]
#ind2 = ind2[:,idx2] ### reorder the flat cluster to match the order of the leaves the dendrogram
""" Error can occur here if hopach was selected in a prior run but now running NONE """
ind2 = [ind2[i] for i in idx2] ### replaces the above due to numpy specific windows version issue
if row_method != None:
idx1 = Z1['leaves'] ### apply the clustering for the gene-dendrograms to the actual matrix data
prior_xt = xt
xt = xt[idx1,:] # xt is transformed x
#ind1 = ind1[idx1,:] ### reorder the flat cluster to match the order of the leaves the dendrogram
try: ind1 = [ind1[i] for i in idx1] ### replaces the above due to numpy specific windows version issue
except Exception:
if 'MarkerGenes' in dataset_name:
ind1 = ['NA']*len(row_header) ### Used for exporting the flat cluster data
row_method = None
### taken from http://stackoverflow.com/questions/2982929/plotting-results-of-hierarchical-clustering-ontop-of-a-matrix-of-data-in-python/3011894#3011894
im = axm.matshow(xt, aspect='auto', origin='lower', cmap=cmap, norm=norm) ### norm=norm added to scale coloring of expression with zero = white or black
axm.set_xticks([]) ### Hides x-ticks
axm.set_yticks([])
#axm.set_axis_off() ### Hide border
#fix_verts(ax1,1)
#fix_verts(ax2,0)
### Adjust the size of the fonts for genes and arrays based on size and character length
row_fontsize = 5
column_fontsize = 5
column_text_max_len = max(map(lambda x: len(x), column_header)) ### Get the maximum length of a column annotation
if len(row_header)<75:
row_fontsize = 6.5
if len(row_header)<50:
row_fontsize = 8
if len(row_header)<25:
row_fontsize = 11
if len(column_header)<75:
column_fontsize = 6.5
if len(column_header)<50:
column_fontsize = 8
if len(column_header)<25:
column_fontsize = 11
if column_text_max_len < 15:
column_fontsize = 15
elif column_text_max_len > 30:
column_fontsize = 6.5
else:
column_fontsize = 10
try:
if len(justShowTheseIDs)>50:
column_fontsize = 7
elif len(justShowTheseIDs)>0:
column_fontsize = 10
if len(justShowTheseIDs)>0:
additional_symbols=[]
import gene_associations, OBO_import
try:
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
#symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception: gene_to_symbol={}; symbol_to_gene={}
except Exception: pass
# Add text
new_row_header=[]
new_column_header=[]
for i in range(x.shape[0]):
if row_method != None:
new_row_header.append(row_header[idx1[i]])
else:
new_row_header.append(row_header[i])
for i in range(x.shape[1]):
if column_method != None:
new_column_header.append(column_header[idx2[i]])
else: ### When not clustering columns
new_column_header.append(column_header[i])
dataset_name = string.replace(dataset_name,'Clustering-','')### clean up the name if already a clustered file
if '-hierarchical' in dataset_name:
dataset_name = string.split(dataset_name,'-hierarchical')[0]
filename = 'Clustering-%s-hierarchical_%s_%s.pdf' % (dataset_name,column_metric,row_metric)
elite_dir, cdt_file, SystemCode = exportFlatClusterData(root_dir + filename, root_dir, dataset_name, new_row_header,new_column_header,xt,ind1,ind2,vmax,display)
def ViewPNG(png_file_dir):
if os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception: os.system('open "'+png_file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+png_file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+png_file_dir+'"')
try:
try:
temp1=len(justShowTheseIDs)
if 'monocle' in justShowTheseIDs and ('driver' not in justShowTheseIDs and 'guide' not in justShowTheseIDs):
import R_interface
print 'Running Monocle through R (be patient, this can take 20 minutes+)'
R_interface.performMonocleAnalysisFromHeatmap(species,cdt_file[:-3]+'txt',cdt_file[:-3]+'txt')
png_file_dir = root_dir+'/Monocle/monoclePseudotime.png'
#print png_file_dir
ViewPNG(png_file_dir)
except Exception: pass # no justShowTheseIDs
except Exception:
print '...Monocle error:'
print traceback.format_exc()
pass
cluster_elite_terms={}; ge_fontsize=11.5; top_genes=[]; proceed=True
try:
try:
if 'driver' in justShowTheseIDs or 'guide' in justShowTheseIDs: proceed = False
except Exception: pass
if proceed:
try:
cluster_elite_terms,top_genes = remoteGOElite(elite_dir,SystemCode=SystemCode)
if cluster_elite_terms['label-size']>40: ge_fontsize = 9.5
except Exception:
pass
except Exception: pass #print traceback.format_exc()
if len(cluster_elite_terms)<1:
try:
elite_dirs = string.split(elite_dir,'GO-Elite')
old_elite_dir = elite_dirs[0]+'GO-Elite'+elite_dirs[-1] ### There are actually GO-Elite/GO-Elite directories for the already clustered
old_elite_dir = string.replace(old_elite_dir,'ICGS/','')
if len(PriorColumnClusters)>0 and len(PriorRowClusters)>0 and skipClustering:
cluster_elite_terms,top_genes = importGOEliteResults(old_elite_dir)
except Exception,e:
#print traceback.format_exc()
pass
try:
if len(justShowTheseIDs)<1 and len(top_genes) > 0 and column_fontsize < 9:
column_fontsize = 10
if len(justShowTheseIDs)<1:
additional_symbols=[]
import gene_associations, OBO_import
try:
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
#symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception: gene_to_symbol={}; symbol_to_gene={}
except Exception: pass
def formatpval(p):
if '-' in p: p1=p[:1]+p[-4:]
else:
p1 = '{number:.{digits}f}'.format(number=float(p), digits=3)
p1=str(p1)
#print traceback.format_exc();sys.exit()
return p1
# Add text
new_row_header=[]
new_column_header=[]
ci=0 ### index of entries in the cluster
last_cluster=1
interval = int(float(string.split(str(len(row_header)/35.0),'.')[0]))+1 ### for enrichment term labels with over 100 genes
increment=interval-2
if len(row_header)<100: increment = interval-1
label_pos=-0.03*len(column_header)-.5
#print label_pos
try:
if 'top' in justShowTheseIDs: justShowTheseIDs.remove('top')
if 'positive' in justShowTheseIDs: justShowTheseIDs.remove('positive')
if 'amplify' in justShowTheseIDs: justShowTheseIDs.remove('amplify')
if 'IntraCorrelatedOnly' in justShowTheseIDs: justShowTheseIDs.remove('IntraCorrelatedOnly')
if 'GuideOnlyCorrelation' in justShowTheseIDs: justShowTheseIDs.remove('GuideOnlyCorrelation')
except Exception:
pass
for i in range(x.shape[0]):
if len(row_header)<40:
radj = len(row_header)*0.009 ### row offset value to center the vertical position of the row label
elif len(row_header)<70:
radj = len(row_header)*0.007 ### row offset value to center the vertical position of the row label
else:
radj = len(row_header)*0.005
cluster = str(ind1[i])
if cluster == 'NA':
new_index = i
try: cluster = 'cluster-'+string.split(row_header[new_index],':')[0]
except Exception: pass
if cluster != last_cluster:
ci=0
increment=0
#print cluster,i,row_header[idx1[i]]
color = 'black'
if row_method != None:
try:
if row_header[idx1[i]] in justShowTheseIDs:
if len(row_header)>len(justShowTheseIDs):
color = 'red'
else: color = 'black'
except Exception: pass
if len(row_header)<106: ### Don't visualize gene associations when more than 100 rows
axm.text(x.shape[1]-0.5, i-radj, ' '+row_header[idx1[i]],fontsize=row_fontsize, color=color, picker=True)
new_row_header.append(row_header[idx1[i]])
new_index = idx1[i]
else:
try:
if row_header[i] in justShowTheseIDs: color = 'red'
else: color = 'black'
except Exception: pass
if len(row_header)<106: ### Don't visualize gene associations when more than 100 rows
axm.text(x.shape[1]-0.5, i-radj, ' '+row_header[i],fontsize=row_fontsize, color=color, picker=True) ### When not clustering rows
new_row_header.append(row_header[i])
new_index = i ### This is different when clustering rows versus not
if len(row_header)<106:
"""
if cluster in cluster_elite_terms:
try:
term = cluster_elite_terms[cluster][ci][1]
axm.text(-1.5, i-radj, term,horizontalalignment='right',fontsize=row_fontsize)
except Exception: pass
ci+=1
"""
pass
else:
feature_id = row_header[new_index]
if ':' in feature_id:
if 'ENS' != feature_id[:3] or 'G0000' in feature_id:
feature_id = string.split(feature_id,':')[1]
else:
feature_id = string.split(feature_id,':')[0]
try: feature_id = gene_to_symbol[feature_id][0]
except Exception: pass
if (' ' in feature_id and ('ENS' in feature_id or 'G0000' in feature_id)):
feature_id = string.split(feature_id,' ')[1]
try:
if feature_id in justShowTheseIDs: color = 'red'
else: color = 'black'
except Exception: pass
try:
if feature_id in justShowTheseIDs or (len(justShowTheseIDs)<1 and feature_id in top_genes):
axm.text(x.shape[1]-0.5, i-radj, ' '+feature_id,fontsize=column_fontsize, color=color,picker=True) ### When not clustering rows
#axm.text(x.shape[1]-0.5, i-radj, ' '+"-",fontsize=column_fontsize, color=color,picker=True) ### When not clustering rows
elif ' ' in row_header[new_index]:
symbol = string.split(row_header[new_index], ' ')[-1]
if symbol in justShowTheseIDs:
axm.text(x.shape[1]-0.5, i-radj, ' '+row_header[new_index],fontsize=column_fontsize, color=color,picker=True)
#axm.text(x.shape[1]-0.5, i-radj, ' '+"-",fontsize=column_fontsize, color=color,picker=True)
except Exception: pass
if cluster in cluster_elite_terms:
if cluster != last_cluster:
cluster_intialized = False
try:
increment+=1
#print [increment,interval,cluster],cluster_elite_terms[cluster][ci][1];sys.exit()
#if increment == interval or (
#print increment,interval,len(row_header),cluster_intialized
if (increment == interval) or (len(row_header)>200 and increment == (interval-9) and cluster_intialized==False): ### second argument brings the label down
cluster_intialized=True
atypical_cluster = False
if ind1[i+9] == 'NA': ### This occurs for custom cluster, such MarkerFinder (not cluster numbers)
atypical_cluster = True
cluster9 = 'cluster-'+string.split(row_header[new_index+9],':')[0]
if (len(row_header)>200 and str(cluster9)!=cluster): continue
elif (len(row_header)>200 and str(ind1[i+9])!=cluster): continue ### prevents the last label in a cluster from overlapping with the first in the next cluster
pvalue,original_term = cluster_elite_terms[cluster][ci]
term = original_term
if 'GO:' in term:
term = string.split(term, '(')[0]
if ':WP' in term:
term = string.split(term, ':WP')[0]
pvalue = formatpval(str(pvalue))
term += ' p='+pvalue
if atypical_cluster == False:
term += ' (c'+str(cluster)+')'
try: cluster_elite_terms[term] = cluster_elite_terms[cluster,original_term] ### store the new term name with the associated genes
except Exception: pass
axm.text(label_pos, i-radj, term,horizontalalignment='right',fontsize=ge_fontsize, picker=True, color = 'blue')
increment=0
ci+=1
except Exception,e:
#print traceback.format_exc();sys.exit()
increment=0
last_cluster = cluster
def onpick1(event):
text = event.artist
print('onpick1 text:', text.get_text())
if 'TreeView' in text.get_text():
try: openTreeView(cdt_file)
except Exception: print 'Failed to open TreeView'
elif 'p=' not in text.get_text():
webbrowser.open('http://www.genecards.org/cgi-bin/carddisp.pl?gene='+string.replace(text.get_text(),' ',''))
else:
#"""
import TableViewer
header = ['Associated Genes']
tuple_list = []
for gene in cluster_elite_terms[text.get_text()]:
tuple_list.append([(gene)])
TableViewer.viewTable(text.get_text(),header,tuple_list) #"""
cluster_prefix = 'c'+string.split(text.get_text(),'(c')[1][:-1]+'-'
for geneSet in EliteGeneSets:
if geneSet == 'GeneOntology':
png_file_dir = elite_dir+'/GO-Elite_results/networks/'+cluster_prefix+'GO'+'.png'
elif geneSet == 'WikiPathways':
png_file_dir = elite_dir+'/GO-Elite_results/networks/'+cluster_prefix+'local'+'.png'
elif len(geneSet)>1:
png_file_dir = elite_dir+'/GO-Elite_results/networks/'+cluster_prefix+geneSet+'.png'
#try: UI.GUI(root_dir,'ViewPNG',[],png_file_dir)
#except Exception: print traceback.format_exc()
try:
alt_png_file_dir = elite_dir+'/GO-Elite_results/networks/'+cluster_prefix+eliteGeneSet+'.png'
png_file_dirs = string.split(alt_png_file_dir,'GO-Elite/')
alt_png_file_dir = png_file_dirs[0]+'GO-Elite/'+png_file_dirs[-1]
except Exception: pass
if os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception:
try: os.system('open "'+png_file_dir+'"')
except Exception: os.startfile('"'+alt_png_file_dir+'"')
elif 'darwin' in sys.platform:
try: os.system('open "'+png_file_dir+'"')
except Exception: os.system('open "'+alt_png_file_dir+'"')
elif 'linux' in sys.platform:
try: os.system('xdg-open "'+png_file_dir+'"')
except Exception: os.system('xdg-open "'+alt_png_file_dir+'"')
#print cluster_elite_terms[text.get_text()]
fig.canvas.mpl_connect('pick_event', onpick1)
for i in range(x.shape[1]):
adji = i
### Controls the vertical position of the column (array) labels
if len(row_header)<3:
cadj = len(row_header)*-0.26 ### column offset value
elif len(row_header)<4:
cadj = len(row_header)*-0.23 ### column offset value
elif len(row_header)<6:
cadj = len(row_header)*-0.18 ### column offset value
elif len(row_header)<10:
cadj = len(row_header)*-0.08 ### column offset value
elif len(row_header)<15:
cadj = len(row_header)*-0.04 ### column offset value
elif len(row_header)<20:
cadj = len(row_header)*-0.05 ### column offset value
elif len(row_header)<22:
cadj = len(row_header)*-0.06 ### column offset value
elif len(row_header)<23:
cadj = len(row_header)*-0.08 ### column offset value
elif len(row_header)>200:
cadj = -2
else:
cadj = -0.9
#cadj = -1
if len(column_header)>15:
adji = i-0.1 ### adjust the relative position of the column label horizontally
if len(column_header)>20:
adji = i-0.2 ### adjust the relative position of the column label horizontally
if len(column_header)>25:
adji = i-0.2 ### adjust the relative position of the column label horizontally
if len(column_header)>30:
adji = i-0.25 ### adjust the relative position of the column label horizontally
if len(column_header)>35:
adji = i-0.3 ### adjust the relative position of the column label horizontally
if column_method != None:
axm.text(adji, cadj, ''+column_header[idx2[i]], rotation=270, verticalalignment="top",fontsize=column_fontsize) # rotation could also be degrees
new_column_header.append(column_header[idx2[i]])
else: ### When not clustering columns
axm.text(adji, cadj, ''+column_header[i], rotation=270, verticalalignment="top",fontsize=column_fontsize)
new_column_header.append(column_header[i])
# Plot colside colors
# axc --> axes for column side colorbar
group_name_list=[]
ind1_clust,ind2_clust = ind1,ind2
ind1,ind2,group_name_list,cb_status = updateColorBarData(ind1,ind2,new_column_header,new_row_header,row_method)
if (column_method != None or 'column' in cb_status) and show_color_bars == True:
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h]) # axes for column side colorbar
cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF', '#CCCCE0','#000066','#FFFF00', '#FF1493'])
#cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF','#FFFF00', '#FF1493'])
if len(unique.unique(ind2))==2: ### cmap_c is too few colors
#cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
cmap_c = matplotlib.colors.ListedColormap(['w', 'k'])
elif len(unique.unique(ind2))==3: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif len(unique.unique(ind2))==4: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C','#FEBC18'])
elif len(unique.unique(ind2))==5: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2))==6: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
#cmap_c = matplotlib.colors.ListedColormap(['w', '#0B9B48', 'w', '#5D82C1','#4CB1E4','#71C065'])
#cmap_c = matplotlib.colors.ListedColormap(['w', 'w', 'k', 'w','w','w'])
elif len(unique.unique(ind2))==7: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
#cmap_c = matplotlib.colors.ListedColormap(['w', 'w', 'w', 'k', 'w','w','w'])
#cmap_c = matplotlib.colors.ListedColormap(['w','w', '#0B9B48', 'w', '#5D82C1','#4CB1E4','#71C065'])
#elif len(unique.unique(ind2))==10: cmap_c = matplotlib.colors.ListedColormap(['w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'k'])
elif len(unique.unique(ind2))==11:
cmap_c = matplotlib.colors.ListedColormap(['#DC2342', 'k', '#0B9B48', '#FDDF5E', '#E0B724', 'w', '#5D82C1', '#F79020', '#4CB1E4', '#983894', '#71C065'])
elif len(unique.unique(ind2))>0: ### cmap_c is too few colors
cmap_c = pylab.cm.gist_rainbow
dc = numpy.array(ind2, dtype=int)
dc.shape = (1,len(ind2))
im_c = axc.matshow(dc, aspect='auto', origin='lower', cmap=cmap_c)
axc.set_xticks([]) ### Hides ticks
if 'hopach' == column_method and len(group_name_list)>0:
axc.set_yticklabels(['','Groups'],fontsize=10)
else:
axc.set_yticks([])
#axc.set_frame_on(False) ### Hide border
if len(group_name_list)>0: ### Add a group color legend key
if 'hopach' == column_method: ### allows us to add the second color bar
axcd = fig.add_axes([ax2_x, ax2_y, ax2_w, color_bar_w]) # dendrogram coordinates with color_bar_w substituted - can use because dendrogram is not used
cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF', '#CCCCE0','#000066','#FFFF00', '#FF1493'])
#cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF','#FFFF00', '#FF1493'])
if len(unique.unique(ind2_clust))==2: ### cmap_c is too few colors
#cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
cmap_c = matplotlib.colors.ListedColormap(['w', 'k'])
elif len(unique.unique(ind2_clust))==3: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif len(unique.unique(ind2_clust))==4: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif len(unique.unique(ind2_clust))==5: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2_clust))==6: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2_clust))==7: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2_clust))>0: ### cmap_c is too few colors
cmap_c = pylab.cm.gist_rainbow
dc = numpy.array(ind2_clust, dtype=int)
dc.shape = (1,len(ind2_clust))
im_cd = axcd.matshow(dc, aspect='auto', origin='lower', cmap=cmap_c)
#axcd.text(-1,-1,'clusters')
axcd.set_yticklabels(['','Clusters'],fontsize=10)
#pylab.yticks(range(1),['HOPACH clusters'])
axcd.set_xticks([]) ### Hides ticks
#axcd.set_yticks([])
axd = fig.add_axes([axcc_x, axcc_y, axcc_w, axcc_h])
group_name_list.sort()
group_colors = map(lambda x: x[0],group_name_list)
group_names = map(lambda x: x[1],group_name_list)
cmap_d = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF', '#CCCCE0','#000066','#FFFF00', '#FF1493'])
#cmap_d = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF','#FFFF00', '#FF1493'])
if len(unique.unique(ind2))==2: ### cmap_c is too few colors
#cmap_d = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
cmap_d = matplotlib.colors.ListedColormap(['w', 'k'])
elif len(unique.unique(ind2))==3: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif len(unique.unique(ind2))==4: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif len(unique.unique(ind2))==5: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2))==6: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
#cmap_d = matplotlib.colors.ListedColormap(['w', '#0B9B48', 'w', '#5D82C1','#4CB1E4','#71C065'])
#cmap_d = matplotlib.colors.ListedColormap(['w', 'w', 'k', 'w', 'w','w','w'])
elif len(unique.unique(ind2))==7: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
#cmap_d = matplotlib.colors.ListedColormap(['w', 'w', 'w', 'k', 'w','w','w'])
#cmap_d = matplotlib.colors.ListedColormap(['w','w', '#0B9B48', 'w', '#5D82C1','#4CB1E4','#71C065'])
#elif len(unique.unique(ind2))==10: cmap_d = matplotlib.colors.ListedColormap(['w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'k'])
elif len(unique.unique(ind2))==11:
#Eryth Gfi1 Gran HSCP-1 HSCP-2 IG2 MDP Meg Mono Multi-Lin Myelo
cmap_d = matplotlib.colors.ListedColormap(['#DC2342', 'k', '#0B9B48', '#FDDF5E', '#E0B724', 'w', '#5D82C1', '#F79020', '#4CB1E4', '#983894', '#71C065'])
elif len(unique.unique(ind2))>0: ### cmap_c is too few colors
cmap_d = pylab.cm.gist_rainbow
dc = numpy.array(group_colors, dtype=int)
dc.shape = (1,len(group_colors))
im_c = axd.matshow(dc, aspect='auto', origin='lower', cmap=cmap_d)
axd.set_yticks([])
#axd.set_xticklabels(group_names, rotation=45, ha='left')
pylab.xticks(range(len(group_names)),group_names,rotation=45,ha='left')
#cmap_c = matplotlib.colors.ListedColormap(map(lambda x: GroupDB[x][-1], new_column_header))
if show_color_bars == False:
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h]) # axes for column side colorbar
axc.set_frame_on(False)
# Plot rowside colors
# axr --> axes for row side colorbar
if (row_method != None or 'row' in cb_status) and show_color_bars == True:
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h]) # axes for column side colorbar
try:
dr = numpy.array(ind1, dtype=int)
dr.shape = (len(ind1),1)
#print ind1, len(ind1)
cmap_r = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF', '#FFFF00', '#FF1493'])
if len(unique.unique(ind1))>4: ### cmap_r is too few colors
cmap_r = pylab.cm.gist_rainbow
im_r = axr.matshow(dr, aspect='auto', origin='lower', cmap=cmap_r)
axr.set_xticks([]) ### Hides ticks
axr.set_yticks([])
#axr.set_frame_on(False) ### Hide border
except Exception:
row_method = None
pass ### likely occurs for some reason when row_method should be None
if show_color_bars == False:
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h]) # axes for column side colorbar
axr.set_frame_on(False)
# Plot color legend
axcb = fig.add_axes([axcb_x, axcb_y, axcb_w, axcb_h], frame_on=False) # axes for colorbar
cb = matplotlib.colorbar.ColorbarBase(axcb, cmap=cmap, norm=norm, orientation='horizontal')
#axcb.set_title("colorkey",fontsize=14)
if 'LineageCorrelations' in dataset_name:
cb.set_label("Lineage Correlation Z Scores",fontsize=11)
elif 'Heatmap' in root_dir:
cb.set_label("GO-Elite Z Scores",fontsize=11)
else:
cb.set_label("Differential Expression (log2)",fontsize=10)
### Add filename label to the heatmap
if len(dataset_name)>30:fontsize = 10
else: fontsize = 12.5
fig.text(0.015, 0.970, dataset_name, fontsize = fontsize)
### Render and save the graphic
pylab.savefig(root_dir + filename)
#print 'Exporting:',filename
filename = filename[:-3]+'png'
pylab.savefig(root_dir + filename, dpi=100) #,dpi=200
includeBackground=False
try:
if 'TkAgg' != matplotlib.rcParams['backend']:
includeBackground = False
except Exception: pass
if includeBackground:
fig.text(0.020, 0.070, 'Open heatmap in TreeView (click here)', fontsize = 11.5, picker=True,color = 'red', backgroundcolor='white')
else:
fig.text(0.020, 0.070, 'Open heatmap in TreeView (click here)', fontsize = 11.5, picker=True,color = 'red')
if 'Outlier' in dataset_name and 'Removed' not in dataset_name:
graphic_link.append(['Hierarchical Clustering - Outlier Genes Genes',root_dir+filename])
elif 'Relative' in dataset_name:
graphic_link.append(['Hierarchical Clustering - Significant Genes (Relative comparisons)',root_dir+filename])
elif 'LineageCorrelations' in filename:
graphic_link.append(['Hierarchical Clustering - Lineage Correlations',root_dir+filename])
elif 'MarkerGenes' in filename:
graphic_link.append(['Hierarchical Clustering - MarkerFinder',root_dir+filename])
elif 'AltExonConfirmed' in filename:
graphic_link.append(['Hierarchical Clustering - AltExonConfirmed',root_dir+filename])
elif 'AltExon' in filename:
graphic_link.append(['Hierarchical Clustering - AltExon',root_dir+filename])
else:
graphic_link.append(['Hierarchical Clustering - Significant Genes',root_dir+filename])
if display:
proceed=True
try:
if 'driver' in justShowTheseIDs or 'guide' in justShowTheseIDs:
proceed = False
except Exception: pass
if proceed:
print 'Exporting:',filename
try: pylab.show()
except Exception: None ### when run in headless mode
fig.clf()
#fig.close() causes segfault
#pylab.close() causes segfault
def openTreeView(filename):
import subprocess
fn = filepath("AltDatabase/TreeView/TreeView.jar")
retcode = subprocess.Popen(['java', "-Xmx500m", '-jar', fn, "-r", filename])
def remoteGOElite(elite_dir,SystemCode = None):
mod = 'Ensembl'
if SystemCode == 'Ae':
mod = 'AltExon'
pathway_permutations = 'FisherExactTest'
filter_method = 'z-score'
z_threshold = 1.96
p_val_threshold = 0.05
change_threshold = 0
if runGOElite:
resources_to_analyze = EliteGeneSets
if 'all' in resources_to_analyze:
resources_to_analyze = 'all'
returnPathways = 'no'
root = None
import GO_Elite
reload(GO_Elite)
input_files = dir_list = unique.read_directory(elite_dir) ### Are there any files to analyze?
if len(input_files)>0 and resources_to_analyze !=['']:
print '\nBeginning to run GO-Elite analysis on all results'
file_dirs = elite_dir,None,elite_dir
enrichmentAnalysisType = 'ORA'
#enrichmentAnalysisType = 'URA'
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,enrichmentAnalysisType,root
try: GO_Elite.remoteAnalysis(variables,'non-UI Heatmap')
except Exception: 'GO-Elite failed for:',elite_dir
try: UI.openDirectory(elite_dir+'/GO-Elite_results')
except Exception: None
cluster_elite_terms,top_genes = importGOEliteResults(elite_dir)
return cluster_elite_terms,top_genes
else:
return {},[]
else:
return {},[]
def importGOEliteResults(elite_dir):
global eliteGeneSet
pruned_results = elite_dir+'/GO-Elite_results/CompleteResults/ORA_pruned/pruned-results_z-score_elite.txt' ### This is the exception (not moved)
if os.path.isfile(pruned_results) == False:
pruned_results = elite_dir+'/GO-Elite_results/pruned-results_z-score_elite.txt'
firstLine=True
cluster_elite_terms={}
all_term_length=[0]
for line in open(pruned_results,'rU').xreadlines():
data = line.rstrip()
values = string.split(data,'\t')
if firstLine:
firstLine=False
try: symbol_index = values.index('gene symbols')
except Exception: symbol_index = None
else:
try: symbol_index = values.index('gene symbols')
except Exception: pass
try:
eliteGeneSet = string.split(values[0][1:],'-')[1][:-4]
try: cluster = str(int(float(string.split(values[0][1:],'-')[0])))
except Exception:
cluster = string.join(string.split(values[0],'-')[:-1],'-')
term = values[2]
all_term_length.append(len(term))
pval = float(values[9])
try: cluster_elite_terms[cluster].append([pval,term])
except Exception: cluster_elite_terms[cluster] = [[pval,term]]
if symbol_index!=None:
symbols = string.split(values[symbol_index],'|')
cluster_elite_terms[cluster,term] = symbols
except Exception,e: pass
for cluster in cluster_elite_terms:
cluster_elite_terms[cluster].sort()
cluster_elite_terms['label-size'] = max(all_term_length)
top_genes = []; count=0
ranked_genes = elite_dir+'/GO-Elite_results/CompleteResults/ORA_pruned/gene_associations/pruned-gene-ranking.txt'
for line in open(ranked_genes,'rU').xreadlines():
data = line.rstrip()
values = string.split(data,'\t')
count+=1
if len(values)>2:
if values[2]!='Symbol':
try: top_genes.append((int(values[4]),values[2]))
except Exception: pass
top_genes.sort(); top_genes.reverse()
top_genes = map(lambda x: x[1],top_genes[:21])
return cluster_elite_terms,top_genes
def mergeRotateAroundPointPage(page, page2, rotation, tx, ty):
from pyPdf import PdfFileWriter, PdfFileReader
translation = [[1, 0, 0],
[0, 1, 0],
[-tx,-ty,1]]
rotation = math.radians(rotation)
rotating = [[math.cos(rotation), math.sin(rotation),0],
[-math.sin(rotation),math.cos(rotation), 0],
[0, 0, 1]]
rtranslation = [[1, 0, 0],
[0, 1, 0],
[tx,ty,1]]
ctm = numpy.dot(translation, rotating)
ctm = numpy.dot(ctm, rtranslation)
return page.mergeTransformedPage(page2, [ctm[0][0], ctm[0][1],
ctm[1][0], ctm[1][1],
ctm[2][0], ctm[2][1]])
def mergePDFs2(pdf1,pdf2,outPdf):
from pyPdf import PdfFileWriter, PdfFileReader
input1 = PdfFileReader(file(pdf1, "rb"))
page1 = input1.getPage(0)
input2 = PdfFileReader(file(pdf2, "rb"))
page2 = input2.getPage(0)
page3 = mergeRotateAroundPointPage(page1, page2,
page1.get('/Rotate') or 0,
page2.mediaBox.getWidth()/2, page2.mediaBox.getWidth()/2)
output = PdfFileWriter()
output.addPage(page3)
outputStream = file(outPdf, "wb")
output.write(outputStream)
outputStream.close()
def mergePDFs(pdf1,pdf2,outPdf):
# http://stackoverflow.com/questions/6041244/how-to-merge-two-landscape-pdf-pages-using-pypdf
from pyPdf import PdfFileWriter, PdfFileReader
input1 = PdfFileReader(file(pdf1, "rb"))
page1 = input1.getPage(0)
page1.mediaBox.upperRight = (page1.mediaBox.getUpperRight_x(), page1.mediaBox.getUpperRight_y())
input2 = PdfFileReader(file(pdf2, "rb"))
page2 = input2.getPage(0)
page2.mediaBox.getLowerLeft_x = (page2.mediaBox.getLowerLeft_x(), page2.mediaBox.getLowerLeft_y())
# Merge
page2.mergePage(page1)
# Output
output = PdfFileWriter()
output.addPage(page1)
outputStream = file(outPdf, "wb")
output.write(outputStream)
outputStream.close()
"""
def merge_horizontal(out_filename, left_filename, right_filename):
#Merge the first page of two PDFs side-to-side
import pyPdf
# open the PDF files to be merged
with open(left_filename) as left_file, open(right_filename) as right_file, open(out_filename, 'w') as output_file:
left_pdf = pyPdf.PdfFileReader(left_file)
right_pdf = pyPdf.PdfFileReader(right_file)
output = pyPdf.PdfFileWriter()
# get the first page from each pdf
left_page = left_pdf.pages[0]
right_page = right_pdf.pages[0]
# start a new blank page with a size that can fit the merged pages side by side
page = output.addBlankPage(
width=left_page.mediaBox.getWidth() + right_page.mediaBox.getWidth(),
height=max(left_page.mediaBox.getHeight(), right_page.mediaBox.getHeight()),
)
# draw the pages on that new page
page.mergeTranslatedPage(left_page, 0, 0)
page.mergeTranslatedPage(right_page, left_page.mediaBox.getWidth(), 0)
# write to file
output.write(output_file)
"""
def inverseDist(value):
if value == 0: value = 1
return math.log(value,2)
def getGOEliteExportDir(root_dir,dataset_name):
if 'AltResults' in root_dir:
root_dir = string.split(root_dir,'AltResults')[0]
if 'ExpressionInput' in root_dir:
root_dir = string.split(root_dir,'ExpressionInput')[0]
if 'ExpressionOutput' in root_dir:
root_dir = string.split(root_dir,'ExpressionOutput')[0]
if 'DataPlots' in root_dir:
root_dir = string.replace(root_dir,'DataPlots','GO-Elite')
elite_dir = root_dir
else:
elite_dir = root_dir+'/GO-Elite'
try: os.mkdir(elite_dir)
except Exception: pass
return elite_dir+'/clustering/'+dataset_name
def systemCodeCheck(IDs):
import gene_associations
id_type_db={}
for id in IDs:
id_type = gene_associations.predictIDSourceSimple(id)
try: id_type_db[id_type]+=1
except Exception: id_type_db[id_type]=1
id_type_count=[]
for i in id_type_db:
id_type_count.append((id_type_db[i],i))
id_type_count.sort()
id_type = id_type_count[-1][-1]
return id_type
def exportFlatClusterData(filename, root_dir, dataset_name, new_row_header,new_column_header,xt,ind1,ind2,vmax,display):
""" Export the clustered results as a text file, only indicating the flat-clusters rather than the tree """
filename = string.replace(filename,'.pdf','.txt')
export_text = export.ExportFile(filename)
column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\t')+'\n' ### format column-names for export
export_text.write(column_header)
column_clusters = string.join(['column_clusters-flat','']+ map(str, ind2),'\t')+'\n' ### format column-flat-clusters for export
export_text.write(column_clusters)
### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match
#new_row_header = new_row_header[::-1]
#xt = xt[::-1]
try: elite_dir = getGOEliteExportDir(root_dir,dataset_name)
except Exception: elite_dir = None
elite_columns = string.join(['InputID','SystemCode'])
try: sy = systemCodeCheck(new_row_header)
except Exception: sy = None
### Export each row in the clustered data matrix xt
i=0
cluster_db={}
export_lines = []
for row in xt:
try:
id = new_row_header[i]
original_id = str(id)
if sy == '$En:Sy':
cluster = 'cluster-'+string.split(id,':')[0]
elif sy == 'S' and ':' in id:
cluster = 'cluster-'+string.split(id,':')[0]
elif sy == 'Sy' and ':' in id:
cluster = 'cluster-'+string.split(id,':')[0]
else:
cluster = 'c'+str(ind1[i])
except Exception:
pass
try:
if 'MarkerGenes' in originalFilename:
cluster = 'cluster-'+string.split(id,':')[0]
id = string.split(id,':')[1]
if ' ' in id:
id = string.split(id,' ')[0]
if 'G000' in id: sy = 'En'
else: sy = 'Sy'
except Exception: pass
try: cluster_db[cluster].append(id)
except Exception: cluster_db[cluster] = [id]
export_lines.append(string.join([original_id,str(ind1[i])]+map(str, row),'\t')+'\n')
i+=1
### Reverse the order of the file
export_lines.reverse()
for line in export_lines:
export_text.write(line)
export_text.close()
### Export GO-Elite input files
allGenes={}
for cluster in cluster_db:
export_elite = export.ExportFile(elite_dir+'/'+cluster+'.txt')
if sy==None:
export_elite.write('ID\n')
else:
export_elite.write('ID\tSystemCode\n')
for id in cluster_db[cluster]:
try:
i1,i2 = string.split(id,' ')
if i1==i2: id = i1
except Exception: pass
if sy == '$En:Sy':
id = string.split(id,':')[1]
ids = string.split(id,' ')
if 'ENS' in ids[0] or 'G0000' in ids[0]: id = ids[0]
else: id = ids[-1]
sc = 'En'
elif sy == 'Sy' and ':' in id:
id = string.split(id,':')[1]
ids = string.split(id,' ')
sc = 'Sy'
elif sy == 'En:Sy':
id = string.split(id,' ')[0]
sc = 'En'
elif sy == 'Ae':
l = string.split(id,':')
if len(l)==2:
id = string.split(id,':')[0] ### Use the Ensembl
if len(l) == 3:
id = string.split(id,':')[1] ### Use the Ensembl
sc = 'En'
if ' ' in id:
ids = string.split(id,' ')
if 'ENS' in ids[-1] or 'G0000' in ids[-1]: id = ids[-1]
else: id = ids[0]
elif sy == 'En' and '&' in id:
for i in string.split(id,'&'):
if 'G0000' in i: id = i; sc = 'En'; break
elif sy == 'Sy' and 'EFN' in id:
sc = 'En'
else:
sc = sy
if sy == 'S':
if ':' in id:
id = string.split(id,':')[-1]
sc = 'Ae'
if '&' in id:
sc = 'Ae'
try: export_elite.write(id+'\t'+sc+'\n')
except Exception: export_elite.write(id+'\n') ### if no System Code known
allGenes[id]=[]
export_elite.close()
try:
if storeGeneSetName != None:
if len(storeGeneSetName)>0 and ('driver' not in justShowTheseIDs and 'guide' not in justShowTheseIDs):
exportCustomGeneSet(storeGeneSetName,species,allGenes)
print 'Exported geneset to "StoredGeneSets"'
except Exception: pass
### Export as CDT file
filename = string.replace(filename,'.txt','.cdt')
if display:
try: exportJTV(filename, new_column_header, new_row_header,vmax=vmax)
except Exception: pass
export_cdt = export.ExportFile(filename)
column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\t')+'\n' ### format column-names for export
export_cdt.write(column_header)
eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\t')+'\n' ### format column-flat-clusters for export
export_cdt.write(eweight)
### Export each row in the clustered data matrix xt
i=0; cdt_lines=[]
for row in xt:
cdt_lines.append(string.join([new_row_header[i]]*2+['1']+map(str, row),'\t')+'\n')
i+=1
### Reverse the order of the file
cdt_lines.reverse()
for line in cdt_lines:
export_cdt.write(line)
export_cdt.close()
return elite_dir, filename, sc
def exportJTV(cdt_dir, column_header, row_header,vmax=None):
### This is a config file for TreeView
filename = string.replace(cdt_dir,'.cdt','.jtv')
export_jtv = export.ExportFile(filename)
cscale = '3'
if len(column_header)>100:
cscale = '1.5'
if len(column_header)>200:
cscale = '1.1'
if len(column_header)>300:
cscale = '0.6'
if len(column_header)>400:
cscale = '0.3'
hscale = '5'
if len(row_header)< 50:
hscale = '10'
if len(row_header)>100:
hscale = '3'
if len(row_header)>500:
hscale = '1'
if len(row_header)>1000:
hscale = '0.5'
contrast = str(float(vmax)/4)[:4] ### base the contrast on the heatmap vmax variable
"""
config = '<DocumentConfig><UrlExtractor/><ArrayUrlExtractor/><MainView><ColorExtractor>'
config+= '<ColorSet down="#00FFFF"/></ColorExtractor><ArrayDrawer/><GlobalXMap>'
config+= '<FixedMap type="Fixed" scale="'+cscale+'"/><FillMap type="Fill"/><NullMap type="Null"/>'
config+= '</GlobalXMap><GlobalYMap><FixedMap type="Fixed" scale="'+hscale+'"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></GlobalYMap><ZoomXMap><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></ZoomXMap><ZoomYMap><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></ZoomYMap><TextView><TextView><GeneSummary/></TextView><TextView>'
config+= '<GeneSummary/></TextView><TextView><GeneSummary/></TextView></TextView><ArrayNameView>'
config+= '<ArraySummary included="0"/></ArrayNameView><AtrSummary/><GtrSummary/></MainView></DocumentConfig>'
export_jtv.write(config)
"""
config = '<DocumentConfig><UrlExtractor/><ArrayUrlExtractor/><MainView><ColorExtractor>'
config+= '<ColorSet down="#00FFFF"/></ColorExtractor><ArrayDrawer/><GlobalXMap>'
config+= '<FixedMap type="Fixed" scale="'+cscale+'"/><FillMap type="Fill"/><NullMap type="Null"/>'
config+= '</GlobalXMap><GlobalYMap><FixedMap type="Fixed" scale="'+hscale+'"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></GlobalYMap><ZoomXMap><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></ZoomXMap><ZoomYMap><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></ZoomYMap><TextView><TextView><GeneSummary/></TextView><TextView>'
config+= '<GeneSummary/></TextView><TextView><GeneSummary/></TextView></TextView><ArrayNameView>'
config+= '<ArraySummary included="0"/></ArrayNameView><AtrSummary/><GtrSummary/></MainView><Views>'
config+= '<View type="Dendrogram" dock="1"><ColorExtractor contrast="'+contrast+'"><ColorSet up="#FFFF00" down="#00CCFF"/>'
config+= '</ColorExtractor><ArrayDrawer/><GlobalXMap current="Fill"><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></GlobalXMap><GlobalYMap current="Fill"><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></GlobalYMap><ZoomXMap><FixedMap type="Fixed"/><FillMap type="Fill"/><NullMap type="Null"/>'
config+= '</ZoomXMap><ZoomYMap current="Fixed"><FixedMap type="Fixed"/><FillMap type="Fill"/><NullMap type="Null"/></ZoomYMap>'
config+= '<TextView><TextView><GeneSummary/></TextView><TextView><GeneSummary/></TextView><TextView><GeneSummary/></TextView>'
config+= '</TextView><ArrayNameView><ArraySummary included="0"/></ArrayNameView><AtrSummary/><GtrSummary/></View></Views></DocumentConfig>'
export_jtv.write(config)
### How to create custom colors - http://matplotlib.sourceforge.net/examples/pylab_examples/custom_cmap.html
def updateColorBarData(ind1,ind2,column_header,row_header,row_method):
""" Replace the top-level cluster information with group assignments for color bar coloring (if group data present)"""
cb_status = 'original'
group_number_list=[]
group_name_list=[]
try: ### Error if GroupDB not recognized as global
if column_header[0] in GroupDB: ### Thus group assignments exist for column headers
cb_status = 'column'
for header in column_header:
group,color,color_num = GroupDB[header]
group_number_list.append(color_num) ### will replace ind2
if (color_num,group) not in group_name_list:
group_name_list.append((color_num,group))
ind2 = group_number_list
if row_header[0] in GroupDB and row_method == None: ### Thus group assignments exist for row headers
group_number_list=[]
if cb_status == 'column': cb_status = 'column-row'
else: cb_status = 'row'
for header in row_header:
group,color,color_num = GroupDB[header]
group_number_list.append(color_num) ### will replace ind2
#group_number_list.reverse()
ind1 = group_number_list
except Exception: None
return ind1,ind2,group_name_list,cb_status
def ConvertFromHex(color1,color2,color3):
c1tuple = tuple(ord(c) for c in color1.lsstrip('0x').decode('hex'))
c2tuple = tuple(ord(c) for c in color2.lsstrip('0x').decode('hex'))
c3tuple = tuple(ord(c) for c in color3.lsstrip('0x').decode('hex'))
def RedBlackSkyBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.9),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def RedBlackBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def RedBlackGreen():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def YellowBlackBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.8),
(0.5, 0.1, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
### yellow is created by adding y = 1 to RedBlackSkyBlue green last tuple
### modulate between blue and cyan using the last y var in the first green tuple
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def BlackYellowBlue():
cdict = {'red': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 0.8),
(0.5, 0.1, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0))
}
### yellow is created by adding y = 1 to RedBlackSkyBlue green last tuple
### modulate between blue and cyan using the last y var in the first green tuple
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def filepath(filename):
fn = unique.filepath(filename)
return fn
def importData(filename,Normalize=False,reverseOrder=True,geneFilter=None,zscore=False):
global priorColumnClusters
global priorRowClusters
getRowClusters=False
start_time = time.time()
fn = filepath(filename)
matrix=[]
original_matrix=[]
row_header=[]
x=0; inputMax=0; inputMin=100
filename = string.replace(filename,'\\','/')
dataset_name = string.split(filename,'/')[-1][:-4]
if '.cdt' in filename: start = 3
else: start = 1
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if '.cdt' in filename: t = [t[0]]+t[3:]
if t[1] == 'row_clusters-flat':
t = t = [t[0]]+t[2:]
### color samples by annotated groups if an expression file
if ('exp.' in filename or 'filteredExp.' in filename) and ':' not in data:
filename = string.replace(filename,'-steady-state.txt','.txt')
try:
import ExpressionBuilder
sample_group_db = ExpressionBuilder.simplerGroupImport(filename)
new_headers = []
for v in t:
if v in sample_group_db:
v = sample_group_db[v]+':'+v
new_headers.append(v)
t = new_headers
except Exception:
#print traceback.format_exc()
pass
group_db, column_header = assignGroupColors(t[1:])
x=1
elif 'column_clusters-flat' in t:
try:
prior = map(lambda x: int(float(x)),t[2:])
#priorColumnClusters = dict(zip(column_header, prior))
priorColumnClusters = prior
except Exception:
pass
start = 2
getRowClusters = True
priorRowClusters=[]
elif 'EWEIGHT' in t: pass
else:
gene = t[0]
if geneFilter==None:
proceed = True
elif gene in geneFilter:
proceed = True
else:
proceed = False
if proceed:
nullsPresent = False
#if ' ' not in t and '' not in t: ### Occurs for rows with missing data
try: s = map(float,t[start:])
except Exception:
nullsPresent=True
s=[]
for value in t[start:]:
try: s.append(float(value))
except Exception: s.append(0.000101)
#s = numpy.ma.masked_values(s, 0.000101)
original_matrix.append(s)
if max(s)>inputMax: inputMax = max(s)
if min(s)<inputMin: inputMin = min(s)
#if (abs(max(s)-min(s)))>2:
if Normalize!=False:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
if Normalize=='row mean':
#avg = min(s)
avg = numpy.mean(s)
else: avg = avg = numpy.median(s)
if nullsPresent:
s=[] ### Needs to be done to zero out the values
for value in t[start:]:
try: s.append(float(value)-avg)
except Exception: s.append(0.000101)
#s = numpy.ma.masked_values(s, 0.000101)
else:
s = map(lambda x: x-avg,s) ### normalize to the mean
if ' ' in gene:
try:
g1,g2 = string.split(gene,' ')
if g1 == g2: gene = g1
except Exception: pass
if getRowClusters:
try:
#priorRowClusters[gene]=int(float(t[1]))
priorRowClusters.append(int(float(t[1])))
except Exception: pass
if zscore:
### convert to z-scores for normalization prior to PCA
avg = numpy.mean(s)
std = numpy.std(s)
if std ==0:
std = 0.1
try: s = map(lambda x: (x-avg)/std,s)
except Exception: pass
if geneFilter==None:
matrix.append(s)
row_header.append(gene)
else:
if gene in geneFilter:
matrix.append(s)
row_header.append(gene)
x+=1
if inputMax>100: ### Thus, not log values
print 'Converting values to log2...'
matrix=[]
k=0
if inputMin==0: increment = 1#0.01
else: increment = 1
for s in original_matrix:
if 'counts.' in filename:
s = map(lambda x: math.log(x+1,2),s)
else:
try: s = map(lambda x: math.log(x+increment,2),s)
except Exception:
print filename
print Normalize
print row_header[k], min(s),max(s); kill
if Normalize!=False:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
if Normalize=='row mean':
avg = numpy.average(s)
else: avg = avg = numpy.median(s)
s = map(lambda x: x-avg,s) ### normalize to the mean
matrix.append(s)
k+=1
del original_matrix
if zscore: print 'Converting values to normalized z-scores...'
#reverseOrder = True ### Cluster order is background (this is a temporary workaround)
if reverseOrder == True:
matrix.reverse(); row_header.reverse()
time_diff = str(round(time.time()-start_time,1))
try:
print '%d rows and %d columns imported for %s in %s seconds...' % (len(matrix),len(column_header),dataset_name,time_diff)
except Exception:
print 'No data in input file.'; force_error
### Add groups for column pre-clustered samples if there
group_db2, row_header2 = assignGroupColors(list(row_header)) ### row_header gets sorted in this function and will get permenantly screwed up if not mutated
#if '.cdt' in filename: matrix.reverse(); row_header.reverse()
for i in group_db2:
if i not in group_db: group_db[i] = group_db2[i]
return matrix, column_header, row_header, dataset_name, group_db
def importSIF(filename):
fn = filepath(filename)
edges=[]
x=0
if '/' in filename:
dataset_name = string.split(filename,'/')[-1][:-4]
else:
dataset_name = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
parent,type,child = string.split(data,'\t')
if 'AltAnalyze' in dataset_name:
### This is the order for proper directed interactions in the AltAnalyze-interaction viewer
edges.append([parent,child,type])
else:
if '(' in parent: ### for TF-target annotations
parent = string.split(parent,'(')[0]
if ':' in child:
child = string.split(child,':')[1]
if 'TF' in dataset_name or 'UserSuppliedAssociations' in dataset_name or 'WGRV' in dataset_name:
edges.append([parent,child,type]) ### Do this to indicate that the TF is regulating the target
else:
edges.append([child,parent,type])
edges = unique.unique(edges)
return edges
def assignGroupColors(t):
""" Assign a unique color to each group. Optionally used for cluster display. """
column_header=[]; group_number_db={}
groupNamesPresent=False # Some samples may have missing group names which will result in a clustering error
for i in t:
if ':' in i: groupNamesPresent = True
for i in t:
repls = {'.2txt' : '', '.2bed' : '', '.2tab' : ''}
i=reduce(lambda a, kv: a.replace(*kv), repls.iteritems(), i)
if ':' in i:
group,j = string.split(i,':')[:2]
group_number_db[group]=[]
elif groupNamesPresent:
group_number_db['UNK']=[]
i = 'UNK:'+i
column_header.append(i)
#import random
k = 0
group_db={}; color_db={}
color_list = ['r', 'b', 'y', 'g', 'w', 'k', 'm']
if len(group_number_db)>3:
color_list = []
cm = pylab.cm.get_cmap('gist_rainbow') #gist_ncar # binary
for i in range(len(group_number_db)):
color_list.append(cm(1.*i/len(group_number_db))) # color will now be an RGBA tuple
#color_list=[]
#color_template = [1,1,1,0,0,0,0.5,0.5,0.5,0.25,0.25,0.25,0.75,0.75,0.75]
t.sort() ### Ensure that all clusters have the same order of groups
for i in column_header:
repls = {'.2txt' : '', '.2bed' : '', '.2tab' : ''}
i=reduce(lambda a, kv: a.replace(*kv), repls.iteritems(), i)
if ':' in i:
group,j = string.split(i,':')[:2]
try: color,ko = color_db[group]
except Exception:
try: color_db[group] = color_list[k],k
except Exception:
### If not listed in the standard color set add a new random color
rgb = tuple(scipy.rand(3)) ### random color
#rgb = tuple(random.sample(color_template,3)) ### custom alternative method
color_list.append(rgb)
color_db[group] = color_list[k], k
color,ko = color_db[group]
k+=1
group_db[i] = group, color, ko
#column_header.append(i)
return group_db, column_header
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def AppendOrWrite(export_path):
export_path = filepath(export_path)
status = verifyFile(export_path)
if status == 'not found':
export_data = export.ExportFile(export_path) ### Write this new file
else:
export_data = open(export_path,'a') ### Appends to existing file
return export_path, export_data, status
def exportCustomGeneSet(geneSetName,species,allGenes):
for gene in allGenes:break
if 'ENS' not in gene:
try:
import gene_associations; import OBO_import
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception: symbol_to_gene={}
if species != None:
export_path, export_data, status = AppendOrWrite('AltDatabase/goelite/'+species+'/gene-mapp/Ensembl-StoredGeneSets.txt')
stored_lines=[]
for line in open(export_path,'rU').xreadlines(): stored_lines.append(line)
if status == 'not found':
export_data.write('GeneID\tEmpty\tGeneSetName\n')
for gene in allGenes:
if ' ' in gene:
a,b=string.split(gene,' ')
if 'ENS' in a: gene = a
else: gene = b
if 'ENS' not in gene and gene in symbol_to_gene:
gene = symbol_to_gene[gene][0]
line = gene+'\t\t'+geneSetName+'\n'
if line not in stored_lines:
export_data.write(line)
export_data.close()
else:
print 'Could not store since no species name provided.'
def writetSNEScores(scores,outputdir):
export_obj = export.ExportFile(outputdir)
for matrix_row in scores:
matrix_row = map(str,matrix_row)
export_obj.write(string.join(matrix_row,'\t')+'\n')
export_obj.close()
def importtSNEScores(inputdir):
scores=[]
### Imports tSNE scores to allow for different visualizations of the same scatter plot
for line in open(inputdir,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
t=map(float,t)
scores.append(t)
return scores
def tSNE(matrix, column_header,dataset_name,group_db,display=True,showLabels=False,
row_header=None,colorByGene=None,species=None,reimportModelScores=True):
try: prior_clusters = priorColumnClusters
except Exception: prior_clusters=[]
try:
if len(prior_clusters)>0 and len(group_db)==0:
newColumnHeader=[]
i=0
for sample_name in column_header:
newColumnHeader.append(str(prior_clusters[i])+':'+sample_name)
i+=1
group_db, column_header = assignGroupColors(newColumnHeader)
except Exception,e:
#print e
group_db={}
if reimportModelScores:
print 'Re-importing t-SNE model scores rather than calculating from scratch',
try: scores = importtSNEScores(root_dir+dataset_name+'-tSNE_scores.txt'); print '...import finished'
except Exception:
reimportModelScores=False; print '...import failed'
if reimportModelScores==False:
from sklearn.manifold import TSNE
X=matrix.T
#model = TSNE(n_components=2, random_state=0,init='pca',early_exaggeration=4.0,perplexity=20)
model = TSNE(n_components=2)
#model = TSNE(n_components=2, random_state=0, n_iter=10000, early_exaggeration=10)
scores=model.fit_transform(X)
### Export the results for optional re-import later
writetSNEScores(scores,root_dir+dataset_name+'-tSNE_scores.txt')
#pylab.scatter(scores[:,0], scores[:,1], 20, labels);
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.xlabel('TSNE-X')
pylab.ylabel('TSNE-Y')
axes = getAxesTransposed(scores) ### adds buffer space to the end of each axis and creates room for a legend
pylab.axis(axes)
marker_size = 15
if len(column_header)>20:
marker_size = 12
if len(column_header)>40:
marker_size = 10
if len(column_header)>150:
marker_size = 7
if len(column_header)>500:
marker_size = 5
if len(column_header)>1000:
marker_size = 4
if len(column_header)>2000:
marker_size = 3
### Color By Gene
if colorByGene != None and len(matrix)==0:
print 'Gene %s not found in the imported dataset... Coloring by groups.' % colorByGene
if colorByGene != None and len(matrix)>0:
gene_translation_db={}
matrix = numpy.array(matrix)
min_val = matrix.min() ### min val
if ' ' in colorByGene:
genes = string.split(colorByGene,' ')
else:
genes = [colorByGene]
genePresent=False
numberGenesPresent=[]
for gene in genes:
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
### Translate symbol to Ensembl
if len(numberGenesPresent)==0:
try:
import gene_associations; import OBO_import
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
for symbol in genes:
if symbol in symbol_to_gene:
gene = symbol_to_gene[symbol][0]
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
gene_translation_db[symbol]=gene
except Exception: pass
numberGenesPresent = len(numberGenesPresent)
if numberGenesPresent==1:
cm = pylab.cm.get_cmap('Reds')
else:
if numberGenesPresent==2:
cm = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
elif numberGenesPresent==3:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif numberGenesPresent==4:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif numberGenesPresent==5:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif numberGenesPresent==6:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif numberGenesPresent==7:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
else:
cm = pylab.cm.get_cmap('gist_rainbow')
if genePresent:
dataset_name+='-'+colorByGene
group_db={}
bestGeneAssociated={}
k=0
for gene in genes:
try:
try: i = row_header.index(gene)
except Exception: i = row_header.index(gene_translation_db[gene])
values = map(float,matrix[i])
min_val = min(values)
bin_size = (max(values)-min_val)/8
max_val = max(values)
ranges = []
iz=min_val
while iz < (max(values)-bin_size/100):
r = iz,iz+bin_size
if len(ranges)==7:
r = iz,max_val
ranges.append(r)
iz+=bin_size
color_db = {}
for i in range(len(ranges)):
if i==0:
color = '#C0C0C0'
else:
if numberGenesPresent==1:
### use a single color gradient
color = cm(1.*i/len(ranges))
#color = cm(1.*(i+1)/len(ranges))
else:
if i>2:
color = cm(k)
else:
color = '#C0C0C0'
color_db[ranges[i]] = color
i=0
for val in values:
sample = column_header[i]
for (l,u) in color_db:
range_index = ranges.index((l,u)) ### what is the ranking of this range
if val>=l and val<=u:
color = color_db[(l,u)]
color_label = [gene+'-range: '+str(l)[:4]+'-'+str(u)[:4],color,'']
group_db[sample] = color_label
try: bestGeneAssociated[sample].append([range_index,val,color_label])
except Exception: bestGeneAssociated[sample] = [[range_index,val,color_label]]
i+=1
#print min(values),min_val,bin_size,max_val
if len(genes)>1:
### Collapse and rank multiple gene results
for sample in bestGeneAssociated:
bestGeneAssociated[sample].sort()
color_label = bestGeneAssociated[sample][-1][-1]
if numberGenesPresent>1:
index = bestGeneAssociated[sample][-1][0]
if index > 2:
gene = string.split(color_label[0],'-')[0]
else:
gene = 'Null'
color_label[0] = gene
group_db[sample] = color_label
except Exception:
print [gene], 'not found in rows...'
#print traceback.format_exc()
k+=1
else:
print [colorByGene], 'not found in rows...'
pylab.title('t-SNE - '+dataset_name)
group_names={}
i=0
for sample_name in column_header: #scores[0]
### Add the text labels for each
try:
### Get group name and color information
group_name,color,k = group_db[sample_name]
if group_name not in group_names:
label = group_name ### Only add once for each group
else: label = None
group_names[group_name] = color
except Exception:
color = 'r'; label=None
ax.plot(scores[i][0],scores[i][1],color=color,marker='o',markersize=marker_size,label=label,markeredgewidth=0,picker=True)
#except Exception: print i, len(scores[pcB]);kill
if showLabels:
try: sample_name = ' '+string.split(sample_name,':')[1]
except Exception: pass
ax.text(scores[i][0],scores[i][1],sample_name,fontsize=11)
i+=1
group_count = []
for i in group_db:
if group_db[i][0] not in group_count:
group_count.append(group_db[i][0])
#print len(group_count)
Lfontsize = 8
if len(group_count)>20:
Lfontsize = 10
if len(group_count)>30:
Lfontsize = 8
if len(group_count)>40:
Lfontsize = 6
if len(group_count)>50:
Lfontsize = 5
i=0
box = ax.get_position()
if len(group_count) > 0: ### Make number larger to get the legend in the plot -- BUT, the axis buffer above has been disabled
# Shink current axis by 20%
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
try: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize = Lfontsize) ### move the legend over to the right of the plot
except Exception: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
else:
ax.set_position([box.x0, box.y0, box.width, box.height])
pylab.legend(loc="upper left", prop={'size': 10})
filename = 'Clustering-%s-t-SNE.pdf' % dataset_name
try: pylab.savefig(root_dir + filename)
except Exception: None ### Rare error
#print 'Exporting:',filename
filename = filename[:-3]+'png'
try: pylab.savefig(root_dir + filename) #dpi=200
except Exception: None ### Rare error
graphic_link.append(['Principal Component Analysis',root_dir+filename])
if display:
print 'Exporting:',filename
try:
pylab.show()
except Exception:
pass### when run in headless mode
def excludeHighlyCorrelatedHits(x,row_header):
### For methylation data or other data with redundant signatures, remove these and only report the first one
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
D1 = numpy.corrcoef(x)
i=0
exclude={}
gene_correlations={}
include = []
for score_ls in D1:
k=0
for v in score_ls:
if str(v)!='nan':
if v>1.00 and k!=i:
#print row_header[i], row_header[k], v
if row_header[i] not in exclude:
exclude[row_header[k]]=[]
#if k not in exclude: include.append(row_header[k])
k+=1
#if i not in exclude: include.append(row_header[i])
i+=1
#print len(exclude),len(row_header);sys.exit()
return exclude
def PrincipalComponentAnalysis(matrix, column_header, row_header, dataset_name,
group_db, display=False, showLabels=True, algorithm='SVD', geneSetName=None,
species=None, pcA=1,pcB=2, colorByGene=None):
print "Performing Principal Component Analysis..."
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
try: prior_clusters = priorColumnClusters
except Exception: prior_clusters=[]
try:
if len(prior_clusters)>0 and len(group_db)==0:
newColumnHeader=[]
i=0
for sample_name in column_header:
newColumnHeader.append(str(prior_clusters[i])+':'+sample_name)
i+=1
group_db, column_header = assignGroupColors(newColumnHeader)
except Exception,e:
#print e
group_db={}
pcA-=1
pcB-=1
""" Based in part on code from:
http://glowingpython.blogspot.com/2011/07/principal-component-analysis-with-numpy.html
Performs performs principal components analysis
(PCA) on the n-by-p data matrix A
Rows of A correspond to observations, columns to variables.
Returns :
coeff :
is a p-by-p matrix, each column containing coefficients
for one principal component.
score :
the principal component scores; that is, the representation
of A in the principal component space. Rows of SCORE
correspond to observations, columns to components.
latent :
a vector containing the eigenvalues
of the covariance matrix of A.
"""
# computing eigenvalues and eigenvectors of covariance matrix
if algorithm == 'SVD': use_svd = True
else: use_svd = False
#M = (matrix-mean(matrix.T,axis=1)).T # subtract the mean (along columns)
Mdif = matrix/matrix.std()
Mdif = Mdif.T
u, s, vt = svd(Mdif, 0)
fracs = s**2/np.sum(s**2)
entropy = -sum(fracs*np.log(fracs))/np.log(np.min(vt.shape))
label1 = 'PC%i (%2.1f%%)' %(pcA+1, fracs[0]*100)
label2 = 'PC%i (%2.1f%%)' %(pcB+1, fracs[1]*100)
#http://docs.scipy.org/doc/scipy/reference/sparse.html
#scipy.sparse.linalg.svds - sparse svd
#idx = numpy.argsort(vt[0,:])
#print idx;sys.exit() # Use this as your cell order or use a density analysis to get groups
#### FROM LARSSON ########
#100 most correlated Genes with PC1
#print vt
PCsToInclude = 4
correlated_db={}
allGenes={}
new_matrix = []
new_headers = []
added_indexes=[]
x = 0
#100 most correlated Genes with PC1
print 'exporting PCA loading genes to:',root_dir+'/PCA/correlated.txt'
exportData = export.ExportFile(root_dir+'/PCA/correlated.txt')
matrix = zip(*matrix) ### transpose this back to normal
try:
while x<PCsToInclude:
idx = numpy.argsort(u[:,x])
correlated = map(lambda i: row_header[i],idx[:300])
anticorrelated = map(lambda i: row_header[i],idx[-300:])
correlated_db[x] = correlated,anticorrelated
### Create a new filtered matrix of loading gene indexes
fidx = list(idx[:300])+list(idx[-300:])
for i in fidx:
if i not in added_indexes:
added_indexes.append(i)
new_headers.append(row_header[i])
new_matrix.append(matrix[i])
x+=1
#redundant_genes = excludeHighlyCorrelatedHits(numpy.array(new_matrix),new_headers)
redundant_genes = []
for x in correlated_db:
correlated,anticorrelated = correlated_db[x]
count=0
for gene in correlated:
if gene not in redundant_genes and count<100:
exportData.write(gene+'\tcorrelated-PC'+str(x+1)+'\n'); allGenes[gene]=[]
count+=1
count=0
for gene in anticorrelated:
if gene not in redundant_genes and count<100:
exportData.write(gene+'\tanticorrelated-PC'+str(x+1)+'\n'); allGenes[gene]=[]
count+=1
exportData.close()
if geneSetName != None:
if len(geneSetName)>0:
exportCustomGeneSet(geneSetName,species,allGenes)
print 'Exported geneset to "StoredGeneSets"'
except Exception:
pass
###########################
#if len(row_header)>20000:
#print '....Using eigenvectors of the real symmetric square matrix for efficiency...'
#[latent,coeff] = scipy.sparse.linalg.eigsh(cov(M))
#scores=mlab.PCA(scores)
if use_svd == False:
[latent,coeff] = linalg.eig(cov(M))
scores = dot(coeff.T,M) # projection of the data in the new space
else:
### transform u into the same structure as the original scores from linalg.eig coeff
scores = vt
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.xlabel(label1)
pylab.ylabel(label2)
axes = getAxes(scores) ### adds buffer space to the end of each axis and creates room for a legend
pylab.axis(axes)
marker_size = 15
if len(column_header)>20:
marker_size = 12
if len(column_header)>40:
marker_size = 10
if len(column_header)>150:
marker_size = 7
if len(column_header)>500:
marker_size = 5
if len(column_header)>1000:
marker_size = 4
if len(column_header)>2000:
marker_size = 3
#marker_size = 9
#samples = list(column_header)
### Color By Gene
if colorByGene != None:
gene_translation_db={}
matrix = numpy.array(matrix)
min_val = matrix.min() ### min val
if ' ' in colorByGene:
genes = string.split(colorByGene,' ')
else:
genes = [colorByGene]
genePresent=False
numberGenesPresent=[]
for gene in genes:
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
### Translate symbol to Ensembl
if len(numberGenesPresent)==0:
try:
import gene_associations; import OBO_import
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
for symbol in genes:
if symbol in symbol_to_gene:
gene = symbol_to_gene[symbol][0]
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
gene_translation_db[symbol]=gene
except Exception: pass
numberGenesPresent = len(numberGenesPresent)
if numberGenesPresent==1:
cm = pylab.cm.get_cmap('Reds')
else:
if numberGenesPresent==2:
cm = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
elif numberGenesPresent==3:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif numberGenesPresent==4:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif numberGenesPresent==5:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif numberGenesPresent==6:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif numberGenesPresent==7:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
else:
cm = pylab.cm.get_cmap('gist_rainbow')
if genePresent:
dataset_name+='-'+colorByGene
group_db={}
bestGeneAssociated={}
k=0
for gene in genes:
try:
try: i = row_header.index(gene)
except Exception: i = row_header.index(gene_translation_db[gene])
values = map(float,matrix[i])
min_val = min(values)
bin_size = (max(values)-min_val)/8
max_val = max(values)
ranges = []
iz=min_val
while iz < (max(values)-bin_size/100):
r = iz,iz+bin_size
if len(ranges)==7:
r = iz,max_val
ranges.append(r)
iz+=bin_size
color_db = {}
for i in range(len(ranges)):
if i==0:
color = '#C0C0C0'
else:
if numberGenesPresent==1:
### use a single color gradient
color = cm(1.*i/len(ranges))
#color = cm(1.*(i+1)/len(ranges))
else:
if i>2:
color = cm(k)
else:
color = '#C0C0C0'
color_db[ranges[i]] = color
i=0
for val in values:
sample = column_header[i]
for (l,u) in color_db:
range_index = ranges.index((l,u)) ### what is the ranking of this range
if val>=l and val<=u:
color = color_db[(l,u)]
color_label = [gene+'-range: '+str(l)[:4]+'-'+str(u)[:4],color,'']
group_db[sample] = color_label
try: bestGeneAssociated[sample].append([range_index,val,color_label])
except Exception: bestGeneAssociated[sample] = [[range_index,val,color_label]]
i+=1
#print min(values),min_val,bin_size,max_val
if len(genes)>1:
### Collapse and rank multiple gene results
for sample in bestGeneAssociated:
bestGeneAssociated[sample].sort()
color_label = bestGeneAssociated[sample][-1][-1]
if numberGenesPresent>1:
index = bestGeneAssociated[sample][-1][0]
if index > 2:
gene = string.split(color_label[0],'-')[0]
else:
gene = 'Null'
color_label[0] = gene
group_db[sample] = color_label
except Exception:
print [gene], 'not found in rows...'
#print traceback.format_exc()
k+=1
else:
print [colorByGene], 'not found in rows...'
pylab.title('Principal Component Analysis - '+dataset_name)
group_names={}
i=0
for sample_name in column_header: #scores[0]
### Add the text labels for each
try:
### Get group name and color information
group_name,color,k = group_db[sample_name]
if group_name not in group_names:
label = group_name ### Only add once for each group
else: label = None
group_names[group_name] = color
except Exception:
color = 'r'; label=None
try: ax.plot(scores[pcA][i],scores[1][i],color=color,marker='o',markersize=marker_size,label=label,markeredgewidth=0,picker=True)
except Exception, e: print e; print i, len(scores[pcB]);kill
if showLabels:
try: sample_name = ' '+string.split(sample_name,':')[1]
except Exception: pass
ax.text(scores[pcA][i],scores[pcB][i],sample_name,fontsize=11)
i+=1
group_count = []
for i in group_db:
if group_db[i][0] not in group_count:
group_count.append(group_db[i][0])
#print len(group_count)
Lfontsize = 8
if len(group_count)>20:
Lfontsize = 10
if len(group_count)>30:
Lfontsize = 8
if len(group_count)>40:
Lfontsize = 6
if len(group_count)>50:
Lfontsize = 5
i=0
#group_count = group_count*10 ### force the legend box out of the PCA core plot
box = ax.get_position()
if len(group_count) > 0:
# Shink current axis by 20%
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
try: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize = Lfontsize) ### move the legend over to the right of the plot
except Exception: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
else:
ax.set_position([box.x0, box.y0, box.width, box.height])
pylab.legend(loc="upper left", prop={'size': 10})
filename = 'Clustering-%s-PCA.pdf' % dataset_name
try: pylab.savefig(root_dir + filename)
except Exception: None ### Rare error
#print 'Exporting:',filename
filename = filename[:-3]+'png'
try: pylab.savefig(root_dir + filename) #dpi=200
except Exception: None ### Rare error
graphic_link.append(['Principal Component Analysis',root_dir+filename])
if display:
print 'Exporting:',filename
try:
pylab.show()
except Exception:
pass### when run in headless mode
fig.clf()
def ica(filename):
showLabels=True
X, column_header, row_header, dataset_name, group_db = importData(filename)
X = map(numpy.array, zip(*X)) ### coverts these to tuples
column_header, row_header = row_header, column_header
ica = FastICA()
scores = ica.fit(X).transform(X) # Estimate the sources
scores /= scores.std(axis=0)
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.xlabel('ICA-X')
pylab.ylabel('ICA-Y')
pylab.title('ICA - '+dataset_name)
axes = getAxes(scores) ### adds buffer space to the end of each axis and creates room for a legend
pylab.axis(axes)
marker_size = 15
if len(column_header)>20:
marker_size = 12
if len(column_header)>40:
marker_size = 10
if len(column_header)>150:
marker_size = 7
if len(column_header)>500:
marker_size = 5
if len(column_header)>1000:
marker_size = 4
if len(column_header)>2000:
marker_size = 3
group_names={}
i=0
for sample_name in row_header: #scores[0]
### Add the text labels for each
try:
### Get group name and color information
group_name,color,k = group_db[sample_name]
if group_name not in group_names:
label = group_name ### Only add once for each group
else: label = None
group_names[group_name] = color
except Exception:
color = 'r'; label=None
ax.plot(scores[0][i],scores[1][i],color=color,marker='o',markersize=marker_size,label=label)
if showLabels:
ax.text(scores[0][i],scores[1][i],sample_name,fontsize=8)
i+=1
pylab.title('ICA recovered signals')
pylab.show()
def plot_samples(S, axis_list=None):
pylab.scatter(S[:, 0], S[:, 1], s=20, marker='o', linewidths=0, zorder=10,
color='red', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
pylab.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
pylab.quiver(0, 0, x_axis, y_axis, zorder=11, width=2, scale=6,
color=color)
pylab.xlabel('x')
pylab.ylabel('y')
def PCA3D(matrix, column_header, row_header, dataset_name, group_db,
display=False, showLabels=True, algorithm='SVD',geneSetName=None,
species=None,colorByGene=None):
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
fig = pylab.figure()
ax = fig.add_subplot(111, projection='3d')
start = time.time()
#M = (matrix-mean(matrix.T,axis=1)).T # subtract the mean (along columns)
try: prior_clusters = priorColumnClusters
except Exception: prior_clusters=[]
try:
if len(prior_clusters)>0 and len(group_db)==0:
newColumnHeader=[]
i=0
for sample_name in column_header:
newColumnHeader.append(str(prior_clusters[i])+':'+sample_name)
i+=1
group_db, column_header = assignGroupColors(newColumnHeader)
except Exception,e:
#print e
group_db={}
if algorithm == 'SVD': use_svd = True
else: use_svd = False
Mdif = matrix/matrix.std()
Mdif = Mdif.T
u, s, vt = svd(Mdif, 0)
fracs = s**2/np.sum(s**2)
entropy = -sum(fracs*np.log(fracs))/np.log(np.min(vt.shape))
label1 = 'PC%i (%2.1f%%)' %(0+1, fracs[0]*100)
label2 = 'PC%i (%2.1f%%)' %(1+1, fracs[1]*100)
label3 = 'PC%i (%2.1f%%)' %(2+1, fracs[2]*100)
PCsToInclude = 4
correlated_db={}
allGenes={}
new_matrix = []
new_headers = []
added_indexes=[]
x = 0
#100 most correlated Genes with PC1
print 'exporting PCA loading genes to:',root_dir+'/PCA/correlated.txt'
exportData = export.ExportFile(root_dir+'/PCA/correlated.txt')
matrix = zip(*matrix) ### transpose this back to normal
try:
while x<PCsToInclude:
idx = numpy.argsort(u[:,x])
correlated = map(lambda i: row_header[i],idx[:300])
anticorrelated = map(lambda i: row_header[i],idx[-300:])
correlated_db[x] = correlated,anticorrelated
### Create a new filtered matrix of loading gene indexes
fidx = list(idx[:300])+list(idx[-300:])
for i in fidx:
if i not in added_indexes:
added_indexes.append(i)
new_headers.append(row_header[i])
new_matrix.append(matrix[i])
x+=1
#redundant_genes = excludeHighlyCorrelatedHits(numpy.array(new_matrix),new_headers)
redundant_genes = []
for x in correlated_db:
correlated,anticorrelated = correlated_db[x]
count=0
for gene in correlated:
if gene not in redundant_genes and count<100:
exportData.write(gene+'\tcorrelated-PC'+str(x+1)+'\n'); allGenes[gene]=[]
count+=1
count=0
for gene in anticorrelated:
if gene not in redundant_genes and count<100:
exportData.write(gene+'\tanticorrelated-PC'+str(x+1)+'\n'); allGenes[gene]=[]
count+=1
exportData.close()
if geneSetName != None:
if len(geneSetName)>0:
exportCustomGeneSet(geneSetName,species,allGenes)
print 'Exported geneset to "StoredGeneSets"'
except ZeroDivisionError:
pass
#numpy.Mdiff.toFile(root_dir+'/PCA/correlated.txt','\t')
if use_svd == False:
[latent,coeff] = linalg.eig(cov(M))
scores = dot(coeff.T,M) # projection of the data in the new space
else:
### transform u into the same structure as the original scores from linalg.eig coeff
scores = vt
end = time.time()
print 'PCA completed in', end-start, 'seconds.'
### Hide the axis number labels
#ax.w_xaxis.set_ticklabels([])
#ax.w_yaxis.set_ticklabels([])
#ax.w_zaxis.set_ticklabels([])
#"""
#ax.set_xticks([]) ### Hides ticks
#ax.set_yticks([])
#ax.set_zticks([])
ax.set_xlabel(label1)
ax.set_ylabel(label2)
ax.set_zlabel(label3)
#"""
#pylab.title('Principal Component Analysis\n'+dataset_name)
"""
pylab.figure()
pylab.xlabel('Principal Component 1')
pylab.ylabel('Principal Component 2')
"""
axes = getAxes(scores,PlotType='3D') ### adds buffer space to the end of each axis and creates room for a legend
pylab.axis(axes)
Lfontsize = 8
group_count = []
for i in group_db:
if group_db[i][0] not in group_count:
group_count.append(group_db[i][0])
### Color By Gene
if colorByGene != None:
gene_translation_db={}
matrix = numpy.array(matrix)
min_val = matrix.min() ### min val
if ' ' in colorByGene:
genes = string.split(colorByGene,' ')
else:
genes = [colorByGene]
genePresent=False
numberGenesPresent=[]
for gene in genes:
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
### Translate symbol to Ensembl
if len(numberGenesPresent)==0:
try:
import gene_associations; import OBO_import
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
for symbol in genes:
if symbol in symbol_to_gene:
gene = symbol_to_gene[symbol][0]
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
gene_translation_db[symbol]=gene
except Exception: pass
numberGenesPresent = len(numberGenesPresent)
if numberGenesPresent==1:
cm = pylab.cm.get_cmap('Reds')
else:
if numberGenesPresent==2:
cm = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
elif numberGenesPresent==3:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif numberGenesPresent==4:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif numberGenesPresent==5:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif numberGenesPresent==6:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif numberGenesPresent==7:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
else:
cm = pylab.cm.get_cmap('gist_rainbow')
if genePresent:
dataset_name+='-'+colorByGene
group_db={}
bestGeneAssociated={}
k=0
for gene in genes:
try:
try: i = row_header.index(gene)
except Exception: i = row_header.index(gene_translation_db[gene])
values = map(float,matrix[i])
min_val = min(values)
bin_size = (max(values)-min_val)/8
max_val = max(values)
ranges = []
iz=min_val
while iz < (max(values)-bin_size/100):
r = iz,iz+bin_size
if len(ranges)==7:
r = iz,max_val
ranges.append(r)
iz+=bin_size
color_db = {}
for i in range(len(ranges)):
if i==0:
color = '#C0C0C0'
else:
if numberGenesPresent==1:
### use a single color gradient
color = cm(1.*i/len(ranges))
#color = cm(1.*(i+1)/len(ranges))
else:
if i>2:
color = cm(k)
else:
color = '#C0C0C0'
color_db[ranges[i]] = color
i=0
for val in values:
sample = column_header[i]
for (l,u) in color_db:
range_index = ranges.index((l,u)) ### what is the ranking of this range
if val>=l and val<=u:
color = color_db[(l,u)]
color_label = [gene+'-range: '+str(l)[:4]+'-'+str(u)[:4],color,'']
group_db[sample] = color_label
try: bestGeneAssociated[sample].append([range_index,val,color_label])
except Exception: bestGeneAssociated[sample] = [[range_index,val,color_label]]
i+=1
#print min(values),min_val,bin_size,max_val
if len(genes)>1:
### Collapse and rank multiple gene results
for sample in bestGeneAssociated:
bestGeneAssociated[sample].sort()
color_label = bestGeneAssociated[sample][-1][-1]
if numberGenesPresent>1:
index = bestGeneAssociated[sample][-1][0]
if index > 2:
gene = string.split(color_label[0],'-')[0]
else:
gene = 'Null'
color_label[0] = gene
group_db[sample] = color_label
except Exception:
print [gene], 'not found in rows...'
#print traceback.format_exc()
k+=1
else:
print [colorByGene], 'not found in rows...'
#print len(group_count)
if len(group_count)>20:
Lfontsize = 10
if len(group_count)>30:
Lfontsize = 8
if len(group_count)>40:
Lfontsize = 6
if len(group_count)>50:
Lfontsize = 5
if len(scores[0])>150:
markersize = 7
else:
markersize = 10
i=0
group_names={}
for x in scores[0]:
### Add the text labels for each
sample_name = column_header[i]
try:
### Get group name and color information
group_name,color, k = group_db[sample_name]
if group_name not in group_names:
label = group_name ### Only add once for each group
else: label = None
group_names[group_name] = color, k
except Exception:
color = 'r'; label=None
ax.plot([scores[0][i]],[scores[1][i]],[scores[2][i]],color=color,marker='o',markersize=markersize,label=label,markeredgewidth=0,picker=True) #markeredgecolor=color
if showLabels:
#try: sample_name = ' '+string.split(sample_name,':')[1]
#except Exception: pass
ax.text(scores[0][i],scores[1][i],scores[2][i], ' '+sample_name,fontsize=9)
i+=1
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
#pylab.legend(loc="upper left", prop={'size': 10})
try: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize = Lfontsize) ### move the legend over to the right of the plot
except Exception: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
filename = 'Clustering-%s-3D-PCA.pdf' % dataset_name
pylab.savefig(root_dir + filename)
#print 'Exporting:',filename
filename = filename[:-3]+'png'
pylab.savefig(root_dir + filename) #dpi=200
graphic_link.append(['Principal Component Analysis',root_dir+filename])
if display:
print 'Exporting:',filename
try: pylab.show()
except Exception: None ### when run in headless mode
fig.clf()
def getAxes1(scores,PlotType=None):
""" Adjust these axes to account for (A) legend size (left hand upper corner)
and (B) long sample name extending to the right
"""
try:
x_range = max(scores[0])-min(scores[0])
y_range = max(scores[1])-min(scores[1])
if PlotType == '3D':
x_axis_min = min(scores[0])-(x_range/10)
x_axis_max = max(scores[0])+(x_range/10)
y_axis_min = min(scores[1])-(y_range/10)
y_axis_max = max(scores[1])+(y_range/10)
else:
x_axis_min = min(scores[0])-(x_range/10)
x_axis_max = max(scores[0])+(x_range/10)
y_axis_min = min(scores[1])-(y_range/10)
y_axis_max = max(scores[1])+(y_range/10)
except KeyError:
None
return [x_axis_min, x_axis_max, y_axis_min, y_axis_max]
def getAxes(scores,PlotType=None):
""" Adjust these axes to account for (A) legend size (left hand upper corner)
and (B) long sample name extending to the right
"""
try:
x_range = max(scores[0])-min(scores[0])
y_range = max(scores[1])-min(scores[1])
if PlotType == '3D':
x_axis_min = min(scores[0])-(x_range/1.5)
x_axis_max = max(scores[0])+(x_range/1.5)
y_axis_min = min(scores[1])-(y_range/5)
y_axis_max = max(scores[1])+(y_range/5)
else:
x_axis_min = min(scores[0])-(x_range/10)
x_axis_max = max(scores[0])+(x_range/10)
y_axis_min = min(scores[1])-(y_range/10)
y_axis_max = max(scores[1])+(y_range/10)
except KeyError:
None
return [x_axis_min, x_axis_max, y_axis_min, y_axis_max]
def getAxesTransposed(scores):
""" Adjust these axes to account for (A) legend size (left hand upper corner)
and (B) long sample name extending to the right
"""
scores = map(numpy.array, zip(*scores))
try:
x_range = max(scores[0])-min(scores[0])
y_range = max(scores[1])-min(scores[1])
x_axis_min = min(scores[0])-int((float(x_range)/7))
x_axis_max = max(scores[0])+int((float(x_range)/7))
y_axis_min = min(scores[1])-int(float(y_range/7))
y_axis_max = max(scores[1])+int(float(y_range/7))
except KeyError:
None
return [x_axis_min, x_axis_max, y_axis_min, y_axis_max]
def Kmeans(features, column_header, row_header):
#http://www.janeriksolem.net/2009/04/clustering-using-scipys-k-means.html
#class1 = numpy.array(numpy.random.standard_normal((100,2))) + numpy.array([5,5])
#class2 = 1.5 * numpy.array(numpy.random.standard_normal((100,2)))
features = numpy.vstack((class1,class2))
centroids,variance = scipy.cluster.vq.kmeans(features,2)
code,distance = scipy.cluster.vq.vq(features,centroids)
"""
This generates two normally distributed classes in two dimensions. To try and cluster the points, run k-means with k=2 like this.
The variance is returned but we don't really need it since the SciPy implementation computes several runs (default is 20) and selects the one with smallest variance for us. Now you can check where each data point is assigned using the vector quantization function in the SciPy package.
By checking the value of code we can see if there are any incorrect assignments. To visualize, we can plot the points and the final centroids.
"""
pylab.plot([p[0] for p in class1],[p[1] for p in class1],'*')
pylab.plot([p[0] for p in class2],[p[1] for p in class2],'r*')
pylab.plot([p[0] for p in centroids],[p[1] for p in centroids],'go')
pylab.show()
"""
def displaySimpleNetworkX():
import networkx as nx
print 'Graphing output with NetworkX'
gr = nx.Graph(rotate=90,bgcolor='white') ### commands for neworkx and pygraphviz are the same or similiar
edges = importSIF('Config/TissueFateMap.sif')
### Add nodes and edges
for (node1,node2,type) in edges:
gr.add_edge(node1,node2)
draw_networkx_edges
#gr['Myometrium']['color']='red'
# Draw as PNG
nx.draw_shell(gr) #wopi, gvcolor, wc, ccomps, tred, sccmap, fdp, circo, neato, acyclic, nop, gvpr, dot, sfdp. - fdp
pylab.savefig('LineageNetwork.png')
def displaySimpleNetwork(sif_filename,fold_db,pathway_name):
import pygraphviz as pgv
#print 'Graphing output with PygraphViz'
gr = pgv.AGraph(bgcolor='white',directed=True) ### Graph creation and setting of attributes - directed indicates arrows should be added
#gr = pgv.AGraph(rotate='90',bgcolor='lightgray')
### Set graph attributes
gr.node_attr['style']='filled'
gr.graph_attr['label']='%s Network' % pathway_name
edges = importSIF(sif_filename)
if len(edges) > 700:
print sif_filename, 'too large to visualize...'
else:
### Add nodes and edges
for (node1,node2,type) in edges:
nodes = (node1,node2)
gr.add_edge(nodes)
child, parent = nodes
edge = gr.get_edge(nodes[0],nodes[1])
if 'TF' in pathway_name or 'WGRV' in pathway_name:
node = child ### This is the regulating TF
else:
node = parent ### This is the pathway
n=gr.get_node(node)
### http://www.graphviz.org/doc/info/attrs.html
n.attr['penwidth'] = 4
n.attr['fillcolor']= '#FFFF00' ### yellow
n.attr['shape']='rectangle'
#n.attr['weight']='yellow'
#edge.attr['arrowhead'] = 'diamond' ### set the arrow type
id_color_db = WikiPathways_webservice.getHexadecimalColorRanges(fold_db,'Genes')
for gene_symbol in id_color_db:
color_code = id_color_db[gene_symbol]
try:
n=gr.get_node(gene_symbol)
n.attr['fillcolor']= '#'+string.upper(color_code) #'#FF0000'
#n.attr['rotate']=90
except Exception: None
# Draw as PNG
#gr.layout(prog='dot') #fdp (spring embedded), sfdp (OK layout), neato (compressed), circo (lots of empty space), dot (hierarchical - linear)
gr.layout(prog='neato')
output_filename = '%s.png' % sif_filename[:-4]
#print output_filename
gr.draw(output_filename)
"""
def findParentDir(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1 ### get just the parent directory
return filename[:x]
def findFilename(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1 ### get just the parent directory
return filename[x:]
def runHierarchicalClustering(matrix, row_header, column_header, dataset_name,
row_method, row_metric, column_method, column_metric,
color_gradient, display=False, contrast=None,
allowAxisCompression=True,Normalize=True):
""" Running with cosine or other distance metrics can often produce negative Z scores
during clustering, so adjustments to the clustering may be required.
=== Options Include ===
row_method = 'average'
column_method = 'single'
row_metric = 'cosine'
column_metric = 'euclidean'
color_gradient = 'red_white_blue'
color_gradient = 'red_black_sky'
color_gradient = 'red_black_blue'
color_gradient = 'red_black_green'
color_gradient = 'yellow_black_blue'
color_gradient == 'coolwarm'
color_gradient = 'seismic'
color_gradient = 'green_white_purple'
"""
try:
if allowLargeClusters: maxSize = 20000
else: maxSize = 7000
except Exception: maxSize = 7000
try:
PriorColumnClusters=priorColumnClusters
PriorRowClusters=priorRowClusters
except Exception:
PriorColumnClusters=None
PriorRowClusters=None
run = False
print 'max allowed cluster size:',maxSize
if len(matrix)>0 and (len(matrix)<maxSize or row_method == None):
#if len(matrix)>5000: row_metric = 'euclidean'
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
try:
### Default for display is False, when set to True, Pylab will render the image
heatmap(numpy.array(matrix), row_header, column_header, row_method, column_method,
row_metric, column_metric, color_gradient, dataset_name, display=display,
contrast=contrast,allowAxisCompression=allowAxisCompression,Normalize=Normalize,
PriorColumnClusters=PriorColumnClusters,PriorRowClusters=PriorRowClusters)
run = True
except Exception:
print traceback.format_exc()
try:
pylab.clf()
pylab.close() ### May result in TK associated errors later on
import gc
gc.collect()
except Exception: None
if len(matrix)<10000:
print 'Error using %s ... trying euclidean instead' % row_metric
row_metric = 'cosine'; row_method = 'average' ### cityblock
else:
print 'Error with hierarchical clustering... only clustering arrays'
row_method = None ### Skip gene clustering
try:
heatmap(numpy.array(matrix), row_header, column_header, row_method, column_method,
row_metric, column_metric, color_gradient, dataset_name, display=display,
contrast=contrast,allowAxisCompression=allowAxisCompression,Normalize=Normalize,
PriorColumnClusters=PriorColumnClusters,PriorRowClusters=PriorRowClusters)
run = True
except Exception:
print traceback.format_exc()
print 'Unable to generate cluster due to dataset incompatibilty.'
elif len(matrix)==0:
print_out = 'SKIPPING HIERARCHICAL CLUSTERING!!! - Your dataset file has no associated rows.'
print print_out
else:
print_out = 'SKIPPING HIERARCHICAL CLUSTERING!!! - Your dataset file is over the recommended size limit for clustering ('+str(maxSize)+' rows). Please cluster later using "Additional Analyses"'
print print_out
try:
pylab.clf()
pylab.close() ### May result in TK associated errors later on
import gc
gc.collect()
except Exception: None
return run
def debugTKBug():
return None
def runHCexplicit(filename, graphics, row_method, row_metric, column_method, column_metric, color_gradient,
extra_params, display=True, contrast=None, Normalize=False, JustShowTheseIDs=[],compressAxis=True):
""" Explicit method for hieararchical clustering with defaults defined by the user (see below function) """
#print [filename, graphics, row_method, row_metric, column_method, column_metric, color_gradient, contrast, Normalize]
global root_dir
global inputFilename
global originalFilename
global graphic_link
global allowLargeClusters
global GroupDB
global justShowTheseIDs
global targetGeneIDs
global normalize
global rho_cutoff
global species
global runGOElite
global EliteGeneSets
global storeGeneSetName
EliteGeneSets=[]
targetGene=[]
filterByPathways=False
runGOElite = False
justShowTheseIDs = JustShowTheseIDs
allowLargeClusters = True
if compressAxis:
allowAxisCompression = True
else:
allowAxisCompression = False
graphic_link=graphics ### Store all locations of pngs
inputFilename = filename ### Used when calling R
filterIDs = False
normalize = Normalize
try:
### Specific additional optional parameters for filtering
transpose = extra_params.Transpose()
try:
rho_cutoff = extra_params.RhoCutoff()
print 'Setting correlation cutoff to a rho of',rho_cutoff
except Exception:
rho_cutoff = 0.5 ### Always done if no rho, but only used if getGeneCorrelations == True
#print 'Setting correlation cutoff to a rho of',rho_cutoff
PathwayFilter = extra_params.PathwaySelect()
GeneSet = extra_params.GeneSet()
OntologyID = extra_params.OntologyID()
Normalize = extra_params.Normalize()
normalize = Normalize
filterIDs = True
species = extra_params.Species()
platform = extra_params.Platform()
vendor = extra_params.Vendor()
newInput = findParentDir(inputFilename)+'/GeneSetClustering/'+findFilename(inputFilename)
targetGene = extra_params.GeneSelection() ### Select a gene or ID to get the top correlating genes
getGeneCorrelations = extra_params.GetGeneCorrelations() ### Select a gene or ID to get the top correlating genes
filterByPathways = extra_params.FilterByPathways()
PathwayFilter, filterByPathways = verifyPathwayName(PathwayFilter,GeneSet,OntologyID,filterByPathways)
justShowTheseIDs_var = extra_params.JustShowTheseIDs()
if len(justShowTheseIDs_var)>0:
justShowTheseIDs = justShowTheseIDs_var
elif len(targetGene)>0:
targetGene = string.replace(targetGene,'\n',' ')
targetGene = string.replace(targetGene,'\r',' ')
justShowTheseIDs = string.split(targetGene,' ')
try:
EliteGeneSets = extra_params.ClusterGOElite()
if EliteGeneSets != ['']: runGOElite = True
except Exception:
#print traceback.format_exc()
pass
try:
storeGeneSetName = extra_params.StoreGeneSetName()
except Exception:
storeGeneSetName = ''
except Exception,e:
#print traceback.format_exc();sys.exit()
transpose = extra_params
root_dir = findParentDir(filename)
if 'ExpressionOutput/Clustering' in root_dir:
root_dir = string.replace(root_dir,'ExpressionOutput/Clustering','DataPlots')
elif 'ExpressionOutput' in root_dir:
root_dir = string.replace(root_dir,'ExpressionOutput','DataPlots') ### Applies to clustering of LineageProfiler results
root_dir = string.replace(root_dir,'/Clustering','') ### Applies to clustering of MarkerFinder results
else:
root_dir += '/DataPlots/'
try: os.mkdir(root_dir) ### May need to create this directory
except Exception: None
if row_method == 'hopach': reverseOrder = False
else: reverseOrder = True
#"""
matrix, column_header, row_header, dataset_name, group_db = importData(filename,Normalize=Normalize,reverseOrder=reverseOrder)
GroupDB = group_db
inputFilename = string.replace(inputFilename,'.cdt','.txt')
originalFilename = inputFilename
try:
if len(priorColumnClusters)>0 and priorRowClusters>0 and row_method==None and column_method == None:
try: justShowTheseIDs = importPriorDrivers(inputFilename)
except Exception: pass #justShowTheseIDs=[]
except Exception:
#print traceback.format_exc()
pass
#print len(matrix),;print len(column_header),;print len(row_header)
if filterIDs:
transpose_update = True ### Since you can filterByPathways and getGeneCorrelations, only transpose once
if filterByPathways: ### Restrict analyses to only a single pathway/gene-set/ontology term
if isinstance(PathwayFilter, tuple) or isinstance(PathwayFilter, list):
FileName = string.join(list(PathwayFilter),' ')
FileName = string.replace(FileName,':','-')
else: FileName = PathwayFilter
if len(FileName)>40:
FileName = FileName[:40]
try: inputFilename = string.replace(newInput,'.txt','_'+FileName+'.txt') ### update the pathway reference for HOPACH
except Exception: inputFilename = string.replace(newInput,'.txt','_GeneSets.txt')
vars = filterByPathway(matrix,row_header,column_header,species,platform,vendor,GeneSet,PathwayFilter,OntologyID,transpose)
try: dataset_name += '-'+FileName
except Exception: dataset_name += '-GeneSets'
transpose_update = False
if 'amplify' in targetGene:
targetGene = string.join(vars[1],' ')+' amplify '+targetGene ### amplify the gene sets, but need the original matrix and headers (not the filtered)
else: matrix,row_header,column_header = vars
try:
alt_targetGene = string.replace(targetGene,'amplify','')
alt_targetGene = string.replace(alt_targetGene,'amplify','')
alt_targetGene = string.replace(alt_targetGene,'driver','')
alt_targetGene = string.replace(alt_targetGene,'guide','')
alt_targetGene = string.replace(alt_targetGene,'top','')
alt_targetGene = string.replace(alt_targetGene,'positive','')
alt_targetGene = string.replace(alt_targetGene,'excludeCellCycle','')
alt_targetGene = string.replace(alt_targetGene,'monocle','')
alt_targetGene = string.replace(alt_targetGene,'GuideOnlyCorrelation','')
alt_targetGene = string.replace(alt_targetGene,' ','')
except Exception:
alt_targetGene = ''
if getGeneCorrelations and targetGene != 'driver' and targetGene != 'GuideOnlyCorrelation' and \
targetGene != 'guide' and targetGene !='excludeCellCycle' and \
targetGene !='top' and targetGene != ' monocle' and \
targetGene !='positive' and len(alt_targetGene)>0: ###Restrict analyses to only genes that correlate with the target gene of interest
allowAxisCompression = False
if transpose and transpose_update == False: transpose_update = False ### If filterByPathways selected
elif transpose and transpose_update: transpose_update = True ### If filterByPathways not selected
else: transpose_update = False ### If transpose == False
if '\r' in targetGene or '\n' in targetGene:
targetGene = string.replace(targetGene, '\r',' ')
targetGene = string.replace(targetGene, '\n',' ')
if len(targetGene)>15:
inputFilename = string.replace(newInput,'.txt','-'+targetGene[:50]+'.txt') ### update the pathway reference for HOPACH
dataset_name += '-'+targetGene[:50]
else:
inputFilename = string.replace(newInput,'.txt','-'+targetGene+'.txt') ### update the pathway reference for HOPACH
dataset_name += '-'+targetGene
inputFilename = root_dir+'/'+string.replace(findFilename(inputFilename),'|',' ')
inputFilename = root_dir+'/'+string.replace(findFilename(inputFilename),':',' ') ### need to be careful of C://
dataset_name = string.replace(dataset_name,'|',' ')
dataset_name = string.replace(dataset_name,':',' ')
try:
matrix,row_header,column_header,row_method = getAllCorrelatedGenes(matrix,row_header,column_header,species,platform,vendor,targetGene,row_method,transpose_update)
except Exception:
print traceback.format_exc()
print targetGene, 'not found in input expression file. Exiting. \n\n'
badExit
targetGeneIDs = targetGene
exportTargetGeneList(targetGene,inputFilename)
else:
if transpose: ### Transpose the data matrix
print 'Transposing the data matrix'
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
#print len(matrix),;print len(column_header),;print len(row_header)
if len(column_header)>1000 or len(row_header)>1000:
print 'Performing hierarchical clustering (please be patient)...'
runHierarchicalClustering(matrix, row_header, column_header, dataset_name, row_method, row_metric,
column_method, column_metric, color_gradient, display=display,contrast=contrast,
allowAxisCompression=allowAxisCompression, Normalize=Normalize)
#"""
#graphic_link = [root_dir+'Clustering-exp.myeloid-steady-state-amplify positive Mki67 Clec4a2 Gria3 Ifitm6 Gfi1b -hierarchical_cosine_cosine.txt']
if 'driver' in targetGene or 'guide' in targetGene:
import RNASeq
input_file = graphic_link[-1][-1][:-4]+'.txt'
if 'excludeCellCycle' in targetGene: excludeCellCycle = True
else: excludeCellCycle = False
print 'excludeCellCycle',excludeCellCycle
targetGene = RNASeq.remoteGetDriverGenes(species,platform,input_file,excludeCellCycle=excludeCellCycle,ColumnMethod=column_method)
extra_params.setGeneSelection(targetGene) ### force correlation to these
extra_params.setGeneSet('None Selected') ### silence this
graphic_link= runHCexplicit(filename, graphic_link, row_method, row_metric, column_method, column_metric, color_gradient,
extra_params, display=display, contrast=contrast, Normalize=Normalize, JustShowTheseIDs=JustShowTheseIDs,compressAxis=compressAxis)
return graphic_link
def importPriorDrivers(inputFilename):
filename = string.replace(inputFilename,'Clustering-','')
filename = string.split(filename,'-hierarchical')[0]+'-targetGenes.txt'
genes = open(filename, "rU")
genes = map(lambda x: cleanUpLine(x),genes)
return genes
def exportTargetGeneList(targetGene,inputFilename):
exclude=['positive','top','driver', 'guide', 'amplify','GuideOnlyCorrelation']
exportFile = inputFilename[:-4]+'-targetGenes.txt'
eo = export.ExportFile(root_dir+findFilename(exportFile))
targetGenes = string.split(targetGene,' ')
for gene in targetGenes:
if gene not in exclude:
try: eo.write(gene+'\n')
except Exception: print 'Error export out gene (bad ascii):', [gene]
eo.close()
def debugPylab():
pylab.figure()
pylab.close()
pylab.figure()
def verifyPathwayName(PathwayFilter,GeneSet,OntologyID,filterByPathways):
import gene_associations
### If the user supplied an Ontology ID rather than a Ontology term name, lookup the term name and return this as the PathwayFilter
if len(OntologyID)>0:
PathwayFilter = gene_associations.lookupOntologyID(GeneSet,OntologyID,type='ID')
filterByPathways = True
return PathwayFilter, filterByPathways
def filterByPathway(matrix,row_header,column_header,species,platform,vendor,GeneSet,PathwayFilter,OntologyID,transpose):
### Filter all the matrix and header entries for IDs in the selected pathway
import gene_associations
import OBO_import
exportData = export.ExportFile(inputFilename)
matrix2=[]; row_header2=[]
if 'Ontology' in GeneSet: directory = 'nested'
else: directory = 'gene-mapp'
print "GeneSet(s) to analyze:",PathwayFilter
if isinstance(PathwayFilter, tuple) or isinstance(PathwayFilter, list): ### see if it is one or more pathways
associated_IDs={}
for p in PathwayFilter:
associated = gene_associations.simpleGenePathwayImport(species,GeneSet,p,OntologyID,directory)
for i in associated:associated_IDs[i]=[]
else:
associated_IDs = gene_associations.simpleGenePathwayImport(species,GeneSet,PathwayFilter,OntologyID,directory)
gene_annotations = gene_associations.importGeneData(species,'Ensembl')
vendor = string.replace(vendor,'other:','') ### For other IDs
try: array_to_ens = gene_associations.filterGeneToUID(species,'Ensembl',vendor,associated_IDs)
except Exception: array_to_ens={}
if platform == "3'array":
### IDs thus won't be Ensembl - need to translate
try:
#ens_to_array = gene_associations.getGeneToUidNoExon(species,'Ensembl-'+vendor); print vendor, 'IDs imported...'
array_to_ens = gene_associations.filterGeneToUID(species,'Ensembl',vendor,associated_IDs)
except Exception:
pass
#print platform, vendor, 'not found!!! Exiting method'; badExit
#array_to_ens = gene_associations.swapKeyValues(ens_to_array)
try:
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception:
pass
i=0
original_rows={} ### Don't add the same original ID twice if it associates with different Ensembl IDs
for row_id in row_header:
original_id = row_id; symbol = row_id
if 'SampleLogFolds' in inputFilename or 'RelativeLogFolds' in inputFilename or 'AltConfirmed' in inputFilename or 'MarkerGenes' in inputFilename or 'blah' not in inputFilename:
try: row_id,symbol = string.split(row_id,' ')[:2] ### standard ID convention is ID space symbol
except Exception:
try: symbol = gene_to_symbol[row_id][0]
except Exception: None
if len(symbol)==0: symbol = row_id
if ':' in row_id:
try:
cluster,row_id = string.split(row_id,':')
updated_row_id = cluster+':'+symbol
except Exception:
pass
else:
updated_row_id = symbol
try: original_id = updated_row_id
except Exception: pass
if platform == "3'array":
try:
try: row_ids = array_to_ens[row_id]
except Exception: row_ids = symbol_to_gene[symbol]
except Exception:
row_ids = [row_id]
else:
try:
try: row_ids = array_to_ens[row_id]
except Exception: row_ids = symbol_to_gene[symbol]
except Exception:
row_ids = [row_id]
for row_id in row_ids:
if row_id in associated_IDs:
if 'SampleLogFolds' in inputFilename or 'RelativeLogFolds' in inputFilename:
if original_id != symbol:
row_id = original_id+' '+symbol
else: row_id = symbol
else:
try: row_id = gene_annotations[row_id].Symbol()
except Exception: None ### If non-Ensembl data
if original_id not in original_rows: ### Don't add the same ID twice if associated with mult. Ensembls
matrix2.append(matrix[i])
#row_header2.append(row_id)
row_header2.append(original_id)
original_rows[original_id]=None
i+=1
if transpose:
matrix2 = map(numpy.array, zip(*matrix2)) ### coverts these to tuples
column_header, row_header2 = row_header2, column_header
exportData.write(string.join(['UID']+column_header,'\t')+'\n') ### title row export
i=0
for row_id in row_header2:
exportData.write(string.join([row_id]+map(str,matrix2[i]),'\t')+'\n') ### export values
i+=1
print len(row_header2), 'filtered IDs'
exportData.close()
return matrix2,row_header2,column_header
def getAllCorrelatedGenes(matrix,row_header,column_header,species,platform,vendor,targetGene,row_method,transpose):
### Filter all the matrix and header entries for IDs in the selected targetGene
resort_by_ID_name=False
if resort_by_ID_name:
index=0; new_row_header=[]; new_matrix=[]; temp_row_header = []
for name in row_header: temp_row_header.append((name,index)); index+=1
temp_row_header.sort()
for (name,index) in temp_row_header:
new_row_header.append(name)
new_matrix.append(matrix[index])
matrix = new_matrix
row_header = new_row_header
exportData = export.ExportFile(inputFilename)
try:
import gene_associations
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
#import OBO_import; symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception:
print 'No Ensembl-Symbol database available for',species
if platform == "3'array":
### IDs thus won't be Ensembl - need to translate
try:
if ':' in vendor:
vendor = string.split(vendor,':')[1]
#ens_to_array = gene_associations.getGeneToUidNoExon(species,'Ensembl-'+vendor); print vendor, 'IDs imported...'
array_to_ens = gene_associations.filterGeneToUID(species,'Ensembl',vendor,{})
except Exception,e:
array_to_ens={}
for uid in array_to_ens:
for gid in array_to_ens[uid]:
if gid in gene_to_symbol:
symbol = gene_to_symbol[gid][0]
try: gene_to_symbol[uid].append(symbol)
except Exception: gene_to_symbol[uid] = [symbol]
matrix2=[]
row_header2=[]
matrix_db={} ### Used to optionally sort according to the original order
multipleGenes = False
i=0
### If multiple genes entered, just display these
targetGenes=[targetGene]
if ' ' in targetGene or ',' in targetGene or '|' in targetGene or '\n' in targetGene or '\r' in targetGene:
multipleGenes = True
if ' ' in targetGene: delim = ' '
if ',' in targetGene: delim = ','
if '|' in targetGene and 'alt_junction' not in originalFilename: delim = '|'
if '\n' in targetGene: delim = '\n'
if '\r' in targetGene: delim = '\r'
targetGenes = string.split(targetGene,delim)
if row_method != None: targetGenes.sort()
for row_id in row_header:
original_rowid = row_id
symbol=row_id
if ':' in row_id:
a,b = string.split(row_id,':')[:2]
if 'ENS' in a or len(a)==17:
try:
row_id = a
symbol = gene_to_symbol[row_id][0]
except Exception: symbol =''
elif 'ENS' not in b and len(a)!=17:
row_id = b
elif 'ENS' in b:
symbol = original_rowid
row_id = a
try: row_id,symbol = string.split(row_id,' ')[:2] ### standard ID convention is ID space symbol
except Exception:
try: symbol = gene_to_symbol[row_id][0]
except Exception:
if 'ENS' not in original_rowid:
row_id, symbol = row_id, row_id
if 'ENS' not in original_rowid and len(original_rowid)!=17:
if original_rowid != symbol:
symbol = original_rowid+' '+symbol
for gene in targetGenes:
if string.lower(gene) == string.lower(row_id) or string.lower(gene) == string.lower(symbol) or string.lower(original_rowid)==string.lower(gene):
matrix2.append(matrix[i]) ### Values for the row
row_header2.append(symbol)
matrix_db[symbol]=matrix[i]
i+=1
i=0
#for gene in targetGenes:
# if gene not in matrix_db: print gene
else:
i=0
original_rows={} ### Don't add the same original ID twice if it associates with different Ensembl IDs
for row_id in row_header:
original_id = row_id
symbol = 'NA'
if 'SampleLogFolds' in inputFilename or 'RelativeLogFolds' in inputFilename or 'blah' not in inputFilename:
try: row_id,symbol = string.split(row_id,' ')[:2] ### standard ID convention is ID space symbol
except Exception:
try: symbol = gene_to_symbol[row_id][0]
except Exception:
row_id, symbol = row_id, row_id
original_id = row_id
if row_id == targetGene or symbol == targetGene:
targetGeneValues = matrix[i] ### Values for the row
break
i+=1
i=0
if multipleGenes==False: limit = 50
else: limit = 140 # lower limit is 132
print 'limit:',limit
if multipleGenes==False or 'amplify' in targetGene or 'correlated' in targetGene:
row_header3=[] ### Convert to symbol if possible
if multipleGenes==False:
targetGeneValue_array = [targetGeneValues]
else:
targetGeneValue_array = matrix2
if len(row_header2)>4 and len(row_header)<20000:
print 'Performing all pairwise corelations...',
corr_matrix = numpyCorrelationMatrixGene(matrix,row_header,row_header2,gene_to_symbol)
print 'complete'
matrix2=[]; original_headers=row_header2; row_header2 = []
matrix2_alt=[]; row_header2_alt=[]
### If one gene entered, display the most positive and negative correlated
import markerFinder; k=0
for targetGeneValues in targetGeneValue_array:
correlated=[]
anticorrelated=[]
try: targetGeneID = original_headers[k]
except Exception: targetGeneID=''
try:
rho_results = list(corr_matrix[targetGeneID])
except Exception:
#print traceback.format_exc()
rho_results = markerFinder.simpleScipyPearson(matrix,targetGeneValues)
correlated_symbols={}
#print targetGeneID, rho_results[:130][-1];sys.exit()
for (rho,ind) in rho_results[:limit]: ### Get the top-50 correlated plus the gene of interest
proceed = True
if 'top' in targetGene:
if rho_results[4][0]<rho_cutoff: proceed = False
if rho>rho_cutoff and proceed: #and rho_results[3][0]>rho_cutoff:# ensures only clustered genes considered
rh = row_header[ind]
#if gene_to_symbol[rh][0] in targetGenes:correlated.append(gene_to_symbol[rh][0])
#correlated.append(gene_to_symbol[rh][0])
if len(row_header2)<100 or multipleGenes:
rh = row_header[ind]
#print rh, rho # Ly6c1, S100a8
if matrix[ind] not in matrix2:
if 'correlated' in targetGene:
if rho!=1:
matrix2.append(matrix[ind])
row_header2.append(rh)
if targetGeneValues not in matrix2: ### gene ID systems can be different between source and query
matrix2.append(targetGeneValues)
row_header2.append(targetGeneID)
try:correlated_symbols[gene_to_symbol[rh][0]]=ind
except Exception: correlated_symbols[rh]=ind
#print targetGeneValues, targetGene;sys.exit()
else:
matrix2.append(matrix[ind])
row_header2.append(rh)
try: correlated_symbols[gene_to_symbol[rh][0]]=ind
except Exception: correlated_symbols[rh]=ind
#if rho!=1: print gene_to_symbol[rh][0],'pos',targetGeneID
#sys.exit()
rho_results.reverse()
for (rho,ind) in rho_results[:limit]: ### Get the top-50 anti-correlated plus the gene of interest
if rho<-1*rho_cutoff and 'positive' not in targetGene:
rh = row_header[ind]
#if gene_to_symbol[rh][0] in targetGenes:anticorrelated.append(gene_to_symbol[rh][0])
#anticorrelated.append(gene_to_symbol[rh][0])
if len(row_header2)<100 or multipleGenes:
rh = row_header[ind]
if matrix[ind] not in matrix2:
if 'correlated' in targetGene:
if rho!=1:
matrix2.append(matrix[ind])
row_header2.append(rh)
if targetGeneValues not in matrix2:
matrix2.append(targetGeneValues)
row_header2.append(targetGeneID)
try: correlated_symbols[gene_to_symbol[rh][0]]=ind
except Exception: correlated_symbols[rh]=ind
#print targetGeneValues, targetGene;sys.exit()
else:
matrix2.append(matrix[ind])
row_header2.append(rh)
try: correlated_symbols[gene_to_symbol[rh][0]]=ind
except Exception: correlated_symbols[rh]=ind
#if rho!=1: print gene_to_symbol[rh][0],'neg',targetGeneID
try:
### print overlapping input genes that are correlated
if len(correlated_symbols)>0:
potentially_redundant=[]
for i in targetGenes:
if i in correlated_symbols:
if i != targetGeneID: potentially_redundant.append((i,correlated_symbols[i]))
if len(potentially_redundant)>0:
### These are intra-correlated genes based on the original filtered query
#print targetGeneID, potentially_redundant
for (rh,ind) in potentially_redundant:
matrix2_alt.append(matrix[ind])
row_header2_alt.append(rh)
rho_results.reverse()
#print targetGeneID, correlated_symbols, rho_results[:5]
except Exception:
pass
k+=1
#print targetGeneID+'\t'+str(len(correlated))+'\t'+str(len(anticorrelated))
#sys.exit()
if 'IntraCorrelatedOnly' in targetGene:
matrix2 = matrix2_alt
row_header2 = row_header2_alt
for r in row_header2:
try:
row_header3.append(gene_to_symbol[r][0])
except Exception: row_header3.append(r)
row_header2 = row_header3
#print len(row_header2),len(row_header3),len(matrix2);sys.exit()
matrix2.reverse() ### Display from top-to-bottom rather than bottom-to-top (this is how the clusters are currently ordered in the heatmap)
row_header2.reverse()
if 'amplify' not in targetGene:
row_method = None ### don't cluster the rows (row_method)
if 'amplify' not in targetGene and 'correlated' not in targetGene:
### reorder according to orignal
matrix_temp=[]
header_temp=[]
#print targetGenes
for symbol in targetGenes:
if symbol in matrix_db:
matrix_temp.append(matrix_db[symbol]); header_temp.append(symbol)
#print len(header_temp), len(matrix_db)
if len(header_temp) >= len(matrix_db): ### Hence it worked and all IDs are the same type
matrix2 = matrix_temp
row_header2 = header_temp
if transpose:
matrix2 = map(numpy.array, zip(*matrix2)) ### coverts these to tuples
column_header, row_header2 = row_header2, column_header
exclude=[]
#exclude = excludeHighlyCorrelatedHits(numpy.array(matrix2),row_header2)
exportData.write(string.join(['UID']+column_header,'\t')+'\n') ### title row export
i=0
for row_id in row_header2:
if ':' in row_id:
a,b = string.split(row_id,':')[:2]
if 'ENS' in a:
try: row_id=string.replace(row_id,a,gene_to_symbol[a][0])
except Exception,e: pass
row_header2[i] = row_id
elif 'ENS' in row_id and ' ' in row_id:
row_id = string.split(row_id, ' ')[1]
row_header2[i] = row_id
elif ' ' in row_id:
try: a,b = string.split(row_id, ' ')
except Exception: a = 1; b=2
if a==b:
row_id = a
if row_id not in exclude:
exportData.write(string.join([row_id]+map(str,matrix2[i]),'\t')+'\n') ### export values
i+=1
print len(row_header2), 'top-correlated IDs'
exportData.close()
return matrix2,row_header2,column_header,row_method
def numpyCorrelationMatrixGeneStore(x,rows,genes,gene_to_symbol):
### Decided not to use since it would require writing out the whole correlation matrix which is huge (1+GB) and time-intensive to import
start = time.time()
output_file = string.replace(originalFilename,'.txt','.corrmatrix')
status = verifyFile(output_file)
gene_correlations={}
if status == 'found':
try: symbol = gene_to_symbol[rows[i]][0]
except Exception: symbol = '$'
def splitInt(x):
rho,ind = string.split(x,'|')
return (float(rho),int(float(ind)))
for line in open(output_file,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
scores = map(lambda x: splitInt(x), t[1:])
gene_correlations[t[0]] = scores
else:
eo=export.ExportFile(output_file)
#D1 = numpy.ma.corrcoef(x)
D1 = numpy.corrcoef(x)
i=0
for score_ls in D1:
scores = []
try: symbol = gene_to_symbol[rows[i]][0]
except Exception: symbol = '$'
if rows[i] in genes or symbol in genes:
k=0
for v in score_ls:
if str(v)!='nan':
scores.append((v,k))
k+=1
scores.sort()
scores.reverse()
if len(symbol)==1: symbol = rows[i]
gene_correlations[symbol] = scores
export_values = [symbol]
for (v,k) in scores: ### re-import next time to save time
export_values.append(str(v)[:5]+'|'+str(k))
eo.write(string.join(export_values,'\t')+'\n')
i+=1
eo.close()
print len(gene_correlations)
print time.time() - start, 'seconds';sys.exit()
return gene_correlations
def numpyCorrelationMatrixGene(x,rows,genes,gene_to_symbol):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
#D1 = numpy.ma.corrcoef(x)
D1 = numpy.corrcoef(x)
i=0
gene_correlations={}
for score_ls in D1:
scores = []
try: symbol = gene_to_symbol[rows[i]][0]
except Exception: symbol = '$'
if rows[i] in genes or symbol in genes:
k=0
for v in score_ls:
if str(v)!='nan':
scores.append((v,k))
k+=1
scores.sort()
scores.reverse()
if len(symbol)==1: symbol = rows[i]
gene_correlations[symbol] = scores
i+=1
return gene_correlations
def runHCOnly(filename,graphics,Normalize=False):
""" Simple method for hieararchical clustering with defaults defined by the function rather than the user (see above function) """
global root_dir
global graphic_link
global inputFilename
global GroupDB
global allowLargeClusters
global runGOElite
global EliteGeneSets
runGOElite = False
EliteGeneSets=[]
allowLargeClusters = False
###############
global inputFilename
global originalFilename
global GroupDB
global justShowTheseIDs
global targetGeneIDs
global normalize
global species
global storeGeneSetName
targetGene=[]
filterByPathways=False
###############
graphic_link=graphics ### Store all locations of pngs
inputFilename = filename ### Used when calling R
root_dir = findParentDir(filename)
if 'ExpressionOutput/Clustering' in root_dir:
root_dir = string.replace(root_dir,'ExpressionOutput/Clustering','DataPlots')
elif 'ExpressionOutput' in root_dir:
root_dir = string.replace(root_dir,'ExpressionOutput','DataPlots') ### Applies to clustering of LineageProfiler results
else:
root_dir += '/DataPlots/'
try: os.mkdir(root_dir) ### May need to create this directory
except Exception: None
row_method = 'average'
column_method = 'weighted'
row_metric = 'cosine'
column_metric = 'cosine'
if 'Lineage' in filename or 'Elite' in filename:
color_gradient = 'red_white_blue'
else:
color_gradient = 'yellow_black_blue'
color_gradient = 'red_black_sky'
matrix, column_header, row_header, dataset_name, group_db = importData(filename,Normalize=Normalize)
GroupDB = group_db
runHierarchicalClustering(matrix, row_header, column_header, dataset_name,
row_method, row_metric, column_method, column_metric, color_gradient, display=False, Normalize=Normalize)
return graphic_link
def runPCAonly(filename,graphics,transpose,showLabels=True,plotType='3D',display=True,
algorithm='SVD',geneSetName=None, species=None, zscore=True, colorByGene=None,
reimportModelScores=True):
global root_dir
global graphic_link
graphic_link=graphics ### Store all locations of pngs
root_dir = findParentDir(filename)
root_dir = string.replace(root_dir,'ExpressionOutput/Clustering','DataPlots')
root_dir = string.replace(root_dir,'ExpressionInput','DataPlots')
if 'DataPlots' not in root_dir:
root_dir += '/DataPlots/'
try: os.mkdir(root_dir) ### May need to create this directory
except Exception: None
### Transpose matrix and build PCA
geneFilter=None
if algorithm == 't-SNE' and reimportModelScores:
dataset_name = string.split(filename,'/')[-1][:-4]
try:
### if the scores are present, we only need to import the genes of interest (save time importing large matrices)
importtSNEScores(root_dir+dataset_name+'-tSNE_scores.txt')
if len(colorByGene)==None:
geneFilter = [''] ### It won't import the matrix, basically
elif ' ' in colorByGene:
geneFilter = string.split(colorByGene,' ')
else:
geneFilter = [colorByGene]
except Exception:
geneFilter = [''] ### It won't import the matrix, basically
matrix, column_header, row_header, dataset_name, group_db = importData(filename,zscore=zscore,geneFilter=geneFilter)
if transpose == False: ### We normally transpose the data, so if True, we don't transpose (I know, it's confusing)
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
if len(column_header)>1000 or len(row_header)>1000 and algorithm != 't-SNE':
print 'Performing Principal Component Analysis (please be patient)...'
#PrincipalComponentAnalysis(numpy.array(matrix), row_header, column_header, dataset_name, group_db, display=True)
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
if algorithm == 't-SNE':
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
tSNE(numpy.array(matrix),column_header,dataset_name,group_db,display=display,
showLabels=showLabels,row_header=row_header,colorByGene=colorByGene,species=species,
reimportModelScores=reimportModelScores)
elif plotType == '3D':
try: PCA3D(numpy.array(matrix), row_header, column_header, dataset_name, group_db,
display=display, showLabels=showLabels, algorithm=algorithm, geneSetName=geneSetName,
species=species, colorByGene=colorByGene)
except Exception:
print traceback.format_exc()
PrincipalComponentAnalysis(numpy.array(matrix), row_header, column_header,
dataset_name, group_db, display=display, showLabels=showLabels, algorithm=algorithm,
geneSetName=geneSetName, species=species, colorByGene=colorByGene)
else:
PrincipalComponentAnalysis(numpy.array(matrix), row_header, column_header, dataset_name,
group_db, display=display, showLabels=showLabels, algorithm=algorithm,
geneSetName=geneSetName, species=species, colorByGene=colorByGene)
return graphic_link
def outputClusters(filenames,graphics,Normalize=False,Species=None,platform=None,vendor=None):
""" Peforms PCA and Hiearchical clustering on exported log-folds from AltAnalyze """
global root_dir
global graphic_link
global inputFilename
global GroupDB
global allowLargeClusters
global EliteGeneSets
EliteGeneSets=[]
global runGOElite
runGOElite = False
allowLargeClusters=False
graphic_link=graphics ### Store all locations of pngs
filename = filenames[0] ### This is the file to cluster with "significant" gene changes
inputFilename = filename ### Used when calling R
root_dir = findParentDir(filename)
root_dir = string.replace(root_dir,'ExpressionOutput/Clustering','DataPlots')
### Transpose matrix and build PCA
original = importData(filename,Normalize=Normalize)
matrix, column_header, row_header, dataset_name, group_db = original
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
if len(row_header)<700000 and len(column_header)<700000 and len(column_header)>2:
PrincipalComponentAnalysis(numpy.array(matrix), row_header, column_header, dataset_name, group_db)
else:
print 'SKIPPING PCA!!! - Your dataset file is over or under the recommended size limit for clustering (>7000 rows). Please cluster later using "Additional Analyses".'
row_method = 'average'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'red_white_blue'
color_gradient = 'red_black_sky'
global species
species = Species
if 'LineageCorrelations' not in filename and 'Zscores' not in filename:
EliteGeneSets=['GeneOntology']
runGOElite = True
### Generate Significant Gene HeatMap
matrix, column_header, row_header, dataset_name, group_db = original
GroupDB = group_db
runHierarchicalClustering(matrix, row_header, column_header, dataset_name, row_method, row_metric, column_method, column_metric, color_gradient, Normalize=Normalize)
### Generate Outlier and other Significant Gene HeatMap
for filename in filenames[1:]:
inputFilename = filename
matrix, column_header, row_header, dataset_name, group_db = importData(filename,Normalize=Normalize)
GroupDB = group_db
try:
runHierarchicalClustering(matrix, row_header, column_header, dataset_name, row_method, row_metric, column_method, column_metric, color_gradient, Normalize=Normalize)
except Exception: print 'Could not cluster',inputFilename,', file not found'
return graphic_link
def importEliteGeneAssociations(gene_filename):
fn = filepath(gene_filename)
x=0; fold_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if data[0]=='#': x=0
elif x==0: x=1
else:
geneid=t[0];symbol=t[1]
fold = 0
try:
if '|' in t[6]:
fold = float(string.split(t[6])[0]) ### Sometimes there are multiple folds for a gene (multiple probesets)
except Exception:
None
try: fold=float(t[6])
except Exception: None
fold_db[symbol] = fold
return fold_db
def importPathwayLevelFolds(filename):
fn = filepath(filename)
x=0
folds_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if len(data)==0: x=0
elif x==0:
z_score_indexes = []; i=0
z_headers = []
for header in t:
if 'z_score.' in header:
z_score_indexes.append(i)
header = string.split(header,'z_score.')[1] ### Get rid of z_score.
if 'AS.' in header:
header = string.split(header,'.p')[0] ### Remove statistics details
header = 'AS.'+string.join(string.split(header,'_')[2:],'_') ### species and array type notation
else:
header = string.join(string.split(header,'-')[:-2],'-')
if '-fold' in header:
header = string.join(string.split(header,'-')[:-1],'-')
z_headers.append(header)
i+=1
headers = string.join(['Gene-Set Name']+z_headers,'\t')+'\n'
x=1
else:
term_name=t[1];geneset_type=t[2]
zscores = map(lambda x: t[x], z_score_indexes)
max_z = max(map(float, zscores)) ### If there are a lot of terms, only show the top 70
line = string.join([term_name]+zscores,'\t')+'\n'
try: zscore_db[geneset_type].append((max_z,line))
except Exception: zscore_db[geneset_type] = [(max_z,line)]
exported_files = []
for geneset_type in zscore_db:
### Create an input file for hierarchical clustering in a child directory (Heatmaps)
clusterinput_filename = findParentDir(filename)+'/Heatmaps/Clustering-Zscores-'+geneset_type+'.txt'
exported_files.append(clusterinput_filename)
export_text = export.ExportFile(clusterinput_filename)
export_text.write(headers) ### Header is the same for each file
zscore_db[geneset_type].sort()
zscore_db[geneset_type].reverse()
i=0 ### count the entries written
for (max_z,line) in zscore_db[geneset_type]:
if i<60:
export_text.write(line) ### Write z-score values and row names
i+=1
export_text.close()
return exported_files
def importOverlappingEliteScores(filename):
fn = filepath(filename)
x=0
zscore_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if len(data)==0: x=0
elif x==0:
z_score_indexes = []; i=0
z_headers = []
for header in t:
if 'z_score.' in header:
z_score_indexes.append(i)
header = string.split(header,'z_score.')[1] ### Get rid of z_score.
if 'AS.' in header:
header = string.split(header,'.p')[0] ### Remove statistics details
header = 'AS.'+string.join(string.split(header,'_')[2:],'_') ### species and array type notation
else:
header = string.join(string.split(header,'-')[:-2],'-')
if '-fold' in header:
header = string.join(string.split(header,'-')[:-1],'-')
z_headers.append(header)
i+=1
headers = string.join(['Gene-Set Name']+z_headers,'\t')+'\n'
x=1
else:
term_name=t[1];geneset_type=t[2]
zscores = map(lambda x: t[x], z_score_indexes)
max_z = max(map(float, zscores)) ### If there are a lot of terms, only show the top 70
line = string.join([term_name]+zscores,'\t')+'\n'
try: zscore_db[geneset_type].append((max_z,line))
except Exception: zscore_db[geneset_type] = [(max_z,line)]
exported_files = []
for geneset_type in zscore_db:
### Create an input file for hierarchical clustering in a child directory (Heatmaps)
clusterinput_filename = findParentDir(filename)+'/Heatmaps/Clustering-Zscores-'+geneset_type+'.txt'
exported_files.append(clusterinput_filename)
export_text = export.ExportFile(clusterinput_filename)
export_text.write(headers) ### Header is the same for each file
zscore_db[geneset_type].sort()
zscore_db[geneset_type].reverse()
i=0 ### count the entries written
for (max_z,line) in zscore_db[geneset_type]:
if i<60:
export_text.write(line) ### Write z-score values and row names
i+=1
export_text.close()
return exported_files
def buildGraphFromSIF(mod,species,sif_filename,ora_input_dir):
""" Imports a SIF and corresponding gene-association file to get fold changes for standardized gene-symbols """
global SpeciesCode; SpeciesCode = species
mod = 'Ensembl'
if sif_filename == None:
### Used for testing only
sif_filename = '/Users/nsalomonis/Desktop/dataAnalysis/collaborations/WholeGenomeRVista/Alex-Figure/GO-Elite_results/CompleteResults/ORA_pruned/up-2f_p05-WGRV.sif'
ora_input_dir = '/Users/nsalomonis/Desktop/dataAnalysis/collaborations/WholeGenomeRVista/Alex-Figure/up-stringent/up-2f_p05.txt'
#sif_filename = 'C:/Users/Nathan Salomonis/Desktop/Endothelial_Kidney/GO-Elite/GO-Elite_results/CompleteResults/ORA_pruned/GE.b_vs_a-fold2.0_rawp0.05-local.sif'
#ora_input_dir = 'C:/Users/Nathan Salomonis/Desktop/Endothelial_Kidney/GO-Elite/input/GE.b_vs_a-fold2.0_rawp0.05.txt'
gene_filename = string.replace(sif_filename,'.sif','_%s-gene-associations.txt') % mod
gene_filename = string.replace(gene_filename,'ORA_pruned','ORA_pruned/gene_associations')
pathway_name = string.split(sif_filename,'/')[-1][:-4]
output_filename = None
try: fold_db = importEliteGeneAssociations(gene_filename)
except Exception: fold_db={}
if ora_input_dir != None:
### This is an optional accessory function that adds fold changes from genes that are NOT in the GO-Elite pruned results (TFs regulating these genes)
try: fold_db = importDataSimple(ora_input_dir,species,fold_db,mod)
except Exception: None
try:
### Alternative Approaches dependening on the availability of GraphViz
#displaySimpleNetXGraph(sif_filename,fold_db,pathway_name)
output_filename = iGraphSimple(sif_filename,fold_db,pathway_name)
except Exception:
print traceback.format_exc()
try: displaySimpleNetwork(sif_filename,fold_db,pathway_name)
except Exception: None ### GraphViz problem
return output_filename
def iGraphSimple(sif_filename,fold_db,pathway_name):
""" Build a network export using iGraph and Cairo """
edges = importSIF(sif_filename)
id_color_db = WikiPathways_webservice.getHexadecimalColorRanges(fold_db,'Genes')
output_filename = iGraphDraw(edges,pathway_name,filePath=sif_filename,display=True,graph_layout='spring',colorDB=id_color_db)
return output_filename
def iGraphDraw(edges, pathway_name, labels=None, graph_layout='shell', display=False,
node_size=700, node_color='yellow', node_alpha=0.5, node_text_size=7,
edge_color='black', edge_alpha=0.5, edge_thickness=2, edges_pos=.3,
text_font='sans-serif',filePath='test',colorDB=None):
### Here node = vertex
output_filename=None
if len(edges) > 700 and 'AltAnalyze' not in pathway_name:
print findFilename(filePath), 'too large to visualize...'
elif len(edges) > 3000:
print findFilename(filePath), 'too large to visualize...'
else:
arrow_scaler = 1 ### To scale the arrow
if edges>40: arrow_scaler = .9
vars = formatiGraphEdges(edges,pathway_name,colorDB,arrow_scaler)
vertices,iGraph_edges,vertice_db,label_list,shape_list,vertex_size, color_list, vertex_label_colors, arrow_width, edge_colors = vars
if vertices>0:
import igraph
gr = igraph.Graph(vertices, directed=True)
canvas_scaler = 0.8 ### To scale the canvas size (bounding box)
if vertices<15: canvas_scaler = 0.5
elif vertices<25: canvas_scaler = .70
elif vertices>35:
canvas_scaler += len(iGraph_edges)/400.00
filePath,canvas_scaler = correctedFilePath(filePath,canvas_scaler) ### adjust for GO-Elite
#print vertices, len(iGraph_edges), pathway_name, canvas_scaler
canvas_size = (600*canvas_scaler,600*canvas_scaler)
gr.add_edges(iGraph_edges)
gr.vs["label"] = label_list
gr.vs["shape"] = shape_list
gr.vs["size"] = vertex_size
gr.vs["label_dist"] = [1.3]*vertices
gr.vs["label_size"] = [12]*vertices
gr.vs["color"]=color_list
gr.vs["label_color"]=vertex_label_colors
gr.es["color"] = edge_colors
gr.es["arrow_size"]=arrow_width
output_filename = '%s.pdf' % filePath[:-4]
output_filename = output_filename.encode('ascii','ignore') ### removes the damned unicode u proceeding the filename
layout = "kk"
visual_style = {}
#visual_style["layout"] = layout #The default is auto, which selects a layout algorithm automatically based on the size and connectedness of the graph
visual_style["margin"] = 50 ### white-space around the network (see vertex size)
visual_style["bbox"] = canvas_size
igraph.plot(gr,output_filename, **visual_style)
output_filename = '%s.png' % filePath[:-4]
output_filename = output_filename.encode('ascii','ignore') ### removes the damned unicode u proceeding the filename
if vertices <15: gr,visual_style = increasePlotSize(gr,visual_style)
igraph.plot(gr,output_filename, **visual_style)
#surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
return output_filename
def correctedFilePath(filePath,canvas_scaler):
""" Move this file to it's own network directory for GO-Elite """
if 'ORA_pruned' in filePath:
filePath = string.replace(filePath,'CompleteResults/ORA_pruned','networks')
try: os.mkdir(findParentDir(filePath))
except Exception: pass
canvas_scaler = canvas_scaler*1.3 ### These graphs tend to be more dense and difficult to read
return filePath,canvas_scaler
def increasePlotSize(gr,visual_style):
### To display the plot better, need to manually increase the size of everything
factor = 2
object_list = ["size","label_size"]
for i in object_list:
new=[]
for k in gr.vs[i]:
new.append(k*factor)
gr.vs[i] = new
new=[]
for i in gr.es["arrow_size"]:
new.append(i*factor)
new=[]
for i in visual_style["bbox"]:
new.append(i*factor)
visual_style["bbox"] = new
visual_style["margin"]=visual_style["margin"]*factor
return gr,visual_style
def getHMDBDataSimple():
### Determine which IDs are metabolites
program_type,database_dir = unique.whatProgramIsThis()
filename = database_dir+'/'+SpeciesCode+'/gene/HMDB.txt'
symbol_hmdb_db={}
x=0
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1
else:
t = string.split(data,'\t')
hmdb_id = t[0]; symbol = t[1]; ProteinNames = t[-1]
symbol_hmdb_db[symbol]=hmdb_id
return symbol_hmdb_db
def formatiGraphEdges(edges,pathway_name,colorDB,arrow_scaler):
### iGraph appears to require defined vertice number and edges as numbers corresponding to these vertices
edge_db={}
edges2=[]
vertice_db={}
shape_list=[] ### node shape in order
label_list=[] ### Names of each vertix aka node
vertex_size=[]
color_list=[]
vertex_label_colors=[]
arrow_width=[] ### Indicates the presence or absence of an arrow
edge_colors=[]
k=0
try: symbol_hmdb_db = getHMDBDataSimple()
except Exception: symbol_hmdb_db={}
for (node1,node2,type) in edges:
edge_color = 'grey'
### Assign nodes to a numeric vertix ID
if 'TF' in pathway_name or 'WGRV' in pathway_name:
pathway = node1 ### This is the regulating TF
else:
pathway = node2 ### This is the pathway
if 'drugInteraction' == type: edge_color = "purple"
elif 'TBar' == type: edge_color = 'blue'
elif 'microRNAInteraction' == type: edge_color = '#53A26D'
elif 'transcription' in type: edge_color = '#FF7D7D'
if 'AltAnalyze' in pathway_name: default_node_color = 'grey'
else: default_node_color = "yellow"
if node1 in vertice_db: v1=vertice_db[node1]
else: #### Left hand node
### Only time the vertex is added to the below attribute lists
v1=k; label_list.append(node1)
rs = 1 ### relative size
if 'TF' in pathway_name or 'WGRV' in pathway_name and 'AltAnalyze' not in pathway_name:
shape_list.append('rectangle')
vertex_size.append(15)
vertex_label_colors.append('blue')
else:
if 'drugInteraction' == type:
rs = 0.75
shape_list.append('rectangle')
vertex_label_colors.append('purple')
default_node_color = "purple"
elif 'Metabolic' == type and node1 in symbol_hmdb_db:
shape_list.append('triangle-up')
vertex_label_colors.append('blue') #dark green
default_node_color = 'grey' #'#008000'
elif 'microRNAInteraction' == type:
rs = 0.75
shape_list.append('triangle-up')
vertex_label_colors.append('#008000') #dark green
default_node_color = 'grey' #'#008000'
else:
shape_list.append('circle')
vertex_label_colors.append('black')
vertex_size.append(10*rs)
vertice_db[node1]=v1; k+=1
try:
color = '#'+string.upper(colorDB[node1])
color_list.append(color) ### Hex color
except Exception:
color_list.append(default_node_color)
if node2 in vertice_db: v2=vertice_db[node2]
else: #### Right hand node
### Only time the vertex is added to the below attribute lists
v2=k; label_list.append(node2)
if 'TF' in pathway_name or 'WGRV' in pathway_name:
shape_list.append('circle')
vertex_size.append(10)
vertex_label_colors.append('black')
default_node_color = "grey"
elif 'AltAnalyze' not in pathway_name:
shape_list.append('rectangle')
vertex_size.append(15)
vertex_label_colors.append('blue')
default_node_color = "grey"
elif 'Metabolic' == type and node2 in symbol_hmdb_db:
shape_list.append('triangle-up')
vertex_label_colors.append('blue') #dark green
default_node_color = 'grey' #'#008000'
else:
shape_list.append('circle')
vertex_size.append(10)
vertex_label_colors.append('black')
default_node_color = "grey"
vertice_db[node2]=v2; k+=1
try:
color = '#'+string.upper(colorDB[node2])
color_list.append(color) ### Hex color
except Exception: color_list.append(default_node_color)
edges2.append((v1,v2))
if type == 'physical': arrow_width.append(0)
else: arrow_width.append(arrow_scaler)
try: edge_db[v1].append(v2)
except Exception: edge_db[v1]=[v2]
try: edge_db[v2].append(v1)
except Exception: edge_db[v2]=[v1]
edge_colors.append(edge_color)
vertices = len(edge_db) ### This is the number of nodes
edge_db = eliminate_redundant_dict_values(edge_db)
vertice_db2={} ### Invert
for node in vertice_db:
vertice_db2[vertice_db[node]] = node
#print len(edges2), len(edge_colors)
print vertices, 'and', len(edges2),'edges in the iGraph network.'
return vertices,edges2,vertice_db2, label_list, shape_list, vertex_size, color_list, vertex_label_colors, arrow_width, edge_colors
def eliminate_redundant_dict_values(database):
db1={}
for key in database: list = unique.unique(database[key]); list.sort(); db1[key] = list
return db1
def importDataSimple(filename,species,fold_db,mod):
""" Imports an input ID file and converts those IDs to gene symbols for analysis with folds """
import GO_Elite
import OBO_import
import gene_associations
fn = filepath(filename)
x=0
metabolite_codes = ['Ck','Ca','Ce','Ch','Cp']
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if data[0]=='#': x=0
elif x==0: x=1
else:
if x == 1:
system_code = t[1]
if system_code in metabolite_codes:
mod = 'HMDB'
system_codes,source_types,mod_types = GO_Elite.getSourceData()
try: source_data = system_codes[system_code]
except Exception:
source_data = None
if 'ENS' in t[0]: source_data = system_codes['En']
else: ### Assume the file is composed of gene symbols
source_data = system_codes['Sy']
if source_data == mod:
source_is_mod = True
elif source_data==None:
None ### Skip this
else:
source_is_mod = False
mod_source = mod+'-'+source_data+'.txt'
gene_to_source_id = gene_associations.getGeneToUid(species,('hide',mod_source))
source_to_gene = OBO_import.swapKeyValues(gene_to_source_id)
try: gene_to_symbol = gene_associations.getGeneToUid(species,('hide',mod+'-Symbol'))
except Exception: gene_to_symbol={}
try: met_to_symbol = gene_associations.importGeneData(species,'HMDB',simpleImport=True)
except Exception: met_to_symbol={}
for i in met_to_symbol: gene_to_symbol[i] = met_to_symbol[i] ### Add metabolite names
x+=1
if source_is_mod == True:
if t[0] in gene_to_symbol:
symbol = gene_to_symbol[t[0]][0]
try: fold_db[symbol] = float(t[2])
except Exception: fold_db[symbol] = 0
else:
fold_db[t[0]] = 0 ### If not found (wrong ID with the wrong system) still try to color the ID in the network as yellow
elif t[0] in source_to_gene:
mod_ids = source_to_gene[t[0]]
try: mod_ids+=source_to_gene[t[2]] ###If the file is a SIF
except Exception:
try: mod_ids+=source_to_gene[t[1]] ###If the file is a SIF
except Exception: None
for mod_id in mod_ids:
if mod_id in gene_to_symbol:
symbol = gene_to_symbol[mod_id][0]
try: fold_db[symbol] = float(t[2]) ### If multiple Ensembl IDs in dataset, only record the last associated fold change
except Exception: fold_db[symbol] = 0
else: fold_db[t[0]] = 0
return fold_db
def clusterPathwayZscores(filename):
""" Imports a overlapping-results file and exports an input file for hierarchical clustering and clusters """
### This method is not fully written or in use yet - not sure if needed
if filename == None:
### Only used for testing
filename = '/Users/nsalomonis/Desktop/dataAnalysis/r4_Bruneau_TopHat/GO-Elite/TF-enrichment2/GO-Elite_results/overlapping-results_z-score_elite.txt'
exported_files = importOverlappingEliteScores(filename)
graphic_links=[]
for file in exported_files:
try: graphic_links = runHCOnly(file,graphic_links)
except Exception,e:
#print e
print 'Unable to generate cluster due to dataset incompatibilty.'
print 'Clustering of overlapping-results_z-score complete (see "GO-Elite_results/Heatmaps" directory)'
def clusterPathwayMeanFolds():
""" Imports the pruned-results file and exports an input file for hierarchical clustering and clusters """
filename = '/Users/nsalomonis/Desktop/User Diagnostics/Mm_spinal_cord_injury/GO-Elite/GO-Elite_results/pruned-results_z-score_elite.txt'
exported_files = importPathwayLevelFolds(filename)
def VennDiagram():
f = pylab.figure()
ax = f.gca()
rad = 1.4
c1 = Circle((-1,0),rad, alpha=.2, fc ='red',label='red')
c2 = Circle((1,0),rad, alpha=.2, fc ='blue',label='blue')
c3 = Circle((0,1),rad, alpha=.2, fc ='green',label='g')
#pylab.plot(c1,color='green',marker='o',markersize=7,label='blue')
#ax.add_patch(c1)
ax.add_patch(c2)
ax.add_patch(c3)
ax.set_xlim(-3,3)
ax.set_ylim(-3,3)
pylab.show()
def plotHistogram(filename):
matrix, column_header, row_header, dataset_name, group_db = importData(filename)
transpose=True
if transpose: ### Transpose the data matrix
print 'Transposing the data matrix'
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
pylab.figure()
for i in matrix:
pylab.hist(i, 200, normed=0, histtype='step', cumulative=-1)
#pylab.hist(matrix, 50, cumulative=-1)
pylab.show()
def multipleSubPlots(filename,uids,SubPlotType='column'):
#uids = [uids[-1]]+uids[:-1]
str_uids = string.join(uids,'_')
matrix, column_header, row_header, dataset_name, group_db = importData(filename,geneFilter=uids)
fig = pylab.figure()
def ReplaceZeros(val,min_val):
if val == 0:
return min_val
else: return val
### Order the graphs based on the original gene order
new_row_header=[]
matrix2 = []
for uid in uids:
if uid in row_header:
ind = row_header.index(uid)
new_row_header.append(uid)
try: update_exp_vals = map(lambda x: ReplaceZeros(x,0.0001),matrix[ind])
except Exception: print uid, len(matrix[ind]);sys.exit()
matrix2.append(update_exp_vals)
matrix = numpy.array(matrix2)
row_header = new_row_header
#print row_header
color_list = ['r', 'b', 'y', 'g', 'w', 'k', 'm']
groups=[]
for sample in column_header:
group = group_db[sample][0]
if group not in groups:
groups.append(group)
fontsize=10
if len(groups)>0:
color_list = []
if len(groups)==9:
cm = matplotlib.colors.ListedColormap(['#80C241', '#118943', '#6FC8BB', '#ED1D30', '#F26E21','#8051A0', '#4684C5', '#FBD019','#3A52A4'])
elif len(groups)==3:
cm = matplotlib.colors.ListedColormap(['#4684C4','#FAD01C','#7D7D7F'])
elif len(groups)==5:
cm = matplotlib.colors.ListedColormap(['#41449B','#6182C1','#9DDAEA','#42AED0','#7F7F7F'])
else:
cm = pylab.cm.get_cmap('gist_rainbow') #gist_ncar
for i in range(len(groups)):
color_list.append(cm(1.*i/len(groups))) # color will now be an RGBA tuple
for i in range(len(matrix)):
ax = pylab.subplot(5,1,1+i)
OY = matrix[i]
pylab.xlim(0,len(OY))
pylab.subplots_adjust(right=0.85)
ind = np.arange(len(OY))
if SubPlotType=='column':
index=-1
for v in OY:
index+=1
group = group_db[column_header[index]][0]
pylab.bar(index, v,edgecolor='black',linewidth=0,color=color_list[groups.index(group)])
width = .35
#print i ,row_header[i]
if SubPlotType=='plot':
pylab.plot(x,y)
ax.text(matrix.shape[1]-0.5, i, ' '+row_header[i],fontsize=16)
fig.autofmt_xdate()
pylab.subplots_adjust(hspace = .001)
temp = tic.MaxNLocator(3)
ax.yaxis.set_major_locator(temp)
ax.set_xticks([])
#ax.title.set_visible(False)
#pylab.xticks(ind + width / 2, column_header)
#ax.set_xticklabels(column_header)
#ax.xaxis.set_ticks([-1]+range(len(OY)+1))
#xtickNames = pylab.setp(pylab.gca(), xticklabels=['']+column_header)
#pylab.setp(xtickNames, rotation=90, fontsize=10)
#pylab.show()
pylab.savefig(filename[:-4]+'-'+str_uids+'.pdf')
def simpleTranspose(filename):
fn = filepath(filename)
matrix = []
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,' ')
matrix.append(t)
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
filename = filename[:-4]+'-transposed.txt'
ea = export.ExportFile(filename)
for i in matrix:
ea.write(string.join(i,'\t')+'\n')
ea.close()
def CorrdinateToBed(filename):
fn = filepath(filename)
matrix = []
translation={}
multiExon={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
data = string.replace(data,' ','')
t = string.split(data,'\t')
if '.gtf' in filename:
if 'chr' not in t[0]: chr = 'chr'+t[0]
else: chr = t[0]
start = t[3]; end = t[4]; strand = t[6]; annotation = t[8]
annotation = string.replace(annotation,'gene_id','')
annotation = string.replace(annotation,'transcript_id','')
annotation = string.replace(annotation,'gene_name','')
geneIDs = string.split(annotation,';')
geneID = geneIDs[0]; symbol = geneIDs[3]
else:
chr = t[4]; strand = t[5]; start = t[6]; end = t[7]
#if 'ENS' not in annotation:
t = [chr,start,end,geneID,'0',strand]
#matrix.append(t)
translation[geneID] = symbol
try: multiExon[geneID]+=1
except Exception: multiExon[geneID]=1
filename = filename[:-4]+'-new.bed'
ea = export.ExportFile(filename)
for i in translation:
#ea.write(string.join(i,'\t')+'\n')
ea.write(i+'\t'+translation[i]+'\t'+str(multiExon[i])+'\n')
ea.close()
def SimpleCorrdinateToBed(filename):
fn = filepath(filename)
matrix = []
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
data = string.replace(data,' ','')
t = string.split(data,'\t')
if '.bed' in filename:
print t;sys.exit()
chr = t[4]; strand = t[5]; start = t[6]; end = t[7]
if 'ENS' in t[0]:
t = [chr,start,end,t[0],'0',strand]
matrix.append(t)
filename = filename[:-4]+'-new.bed'
ea = export.ExportFile(filename)
for i in matrix:
ea.write(string.join(i,'\t')+'\n')
ea.close()
def simpleIntegrityCheck(filename):
fn = filepath(filename)
matrix = []
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
data = string.replace(data,' ','')
t = string.split(data,'\t')
matrix.append(t)
filename = filename[:-4]+'-new.bed'
ea = export.ExportFile(filename)
for i in matrix:
ea.write(string.join(i,'\t')+'\n')
ea.close()
def BedFileCheck(filename):
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'-new.bed'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
firstRow = False
else:
#if len(t) != 12: print len(t);sys.exit()
ea.write(string.join(t,'\t')+'\n')
ea.close()
def simpleFilter(filename):
fn = filepath(filename)
filename = filename[:-4]+'-new.txt'
ea = export.ExportFile(filename)
matrix = []
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,',')
uid = t[0]
#if '=chr' in t[0]:
if 1==2:
a,b = string.split(t[0],'=')
b = string.replace(b,'_',':')
uid = a+ '='+b
matrix.append(t)
ea.write(string.join([uid]+t[1:],'\t')+'\n')
ea.close()
def test(filename):
symbols2={}
firstLine=True
fn = filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine=False
header = t
i=0; start=None; alt_start=None
value_indexes=[]
groups = {}
group = 0
for h in header:
if h == 'WikiPathways': start=i
if h == 'Select Protein Classes': alt_start=i
i+=1
if start == None: start = alt_start
for h in header:
if h>i:
group[i]
i+=1
if start == None: start = alt_start
else:
uniprot = t[0]
symbols = string.replace(t[-1],';;',';')
symbols = string.split(symbols,';')
for s in symbols:
if len(s)>0:
symbols2[string.upper(s),uniprot]=[]
for (s,u) in symbols2:
ea.write(string.join([s,u],'\t')+'\n')
ea.close()
def coincentIncedenceTest(exp_file,TFs):
fn = filepath(TFs)
tfs={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
tfs[data]=[]
comparisons={}
for tf1 in tfs:
for tf2 in tfs:
if tf1!=tf2:
temp = [tf1,tf2]
temp.sort()
comparisons[tuple(temp)]=[]
gene_data={}
firstLine=True
fn = filepath(exp_file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if firstLine:
firstLine=False
header = string.split(data,'\t')[1:]
else:
t = string.split(data,'\t')
gene = t[0]
values = map(float,t[1:])
gene_data[gene] = values
filename = TFs[:-4]+'-all-coincident-4z.txt'
ea = export.ExportFile(filename)
comparison_db={}
for comparison in comparisons:
vals1 = gene_data[comparison[0]]
vals2 = gene_data[comparison[1]]
i=0
coincident=[]
for v1 in vals1:
v2 = vals2[i]
#print v1,v2
if v1>1 and v2>1:
coincident.append(i)
i+=1
i=0
population_db={}; coincident_db={}
for h in header:
population=string.split(h,':')[0]
if i in coincident:
try: coincident_db[population]+=1
except Exception: coincident_db[population]=1
try: population_db[population]+=1
except Exception: population_db[population]=1
i+=1
import mappfinder
final_population_percent=[]
for population in population_db:
d = population_db[population]
try: c = coincident_db[population]
except Exception: c = 0
N = float(len(header)) ### num all samples examined
R = float(len(coincident)) ### num all coincedent samples for the TFs
n = float(d) ### num all samples in cluster
r = float(c) ### num all coincident samples in cluster
try: z = mappfinder.Zscore(r,n,N,R)
except Exception: z=0
#if 'Gfi1b' in comparison and 'Gata1' in comparison: print N, R, n, r, z
final_population_percent.append([population,str(c),str(d),str(float(c)/float(d)),str(z)])
comparison_db[comparison]=final_population_percent
filtered_comparison_db={}
top_scoring_population={}
for comparison in comparison_db:
max_group=[]
for population_stat in comparison_db[comparison]:
z = float(population_stat[-1])
c = float(population_stat[1])
population = population_stat[0]
max_group.append([z,population])
max_group.sort()
z = max_group[-1][0]
pop = max_group[-1][1]
if z>(1.96)*2 and c>3:
filtered_comparison_db[comparison]=comparison_db[comparison]
top_scoring_population[comparison] = pop,z
firstLine = True
for comparison in filtered_comparison_db:
comparison_alt = string.join(list(comparison),'|')
all_percents=[]
for line in filtered_comparison_db[comparison]:
all_percents.append(line[3])
if firstLine:
all_headers=[]
for line in filtered_comparison_db[comparison]:
all_headers.append(line[0])
ea.write(string.join(['gene-pair']+all_headers+['Top Population','Top Z'],'\t')+'\n')
firstLine=False
pop,z = top_scoring_population[comparison]
ea.write(string.join([comparison_alt]+all_percents+[pop,str(z)],'\t')+'\n')
ea.close()
def getlastexon(filename):
filename2 = filename[:-4]+'-last-exon.txt'
ea = export.ExportFile(filename2)
firstLine=True
fn = filepath(filename)
last_gene = 'null'; last_exon=''
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine=False
else:
gene = t[2]
if gene != last_gene:
if ':E' in last_exon:
gene,exon = last_exon = string.split(':E')
block,region = string.split(exon,'.')
try: ea.write(last_exon+'\n')
except: pass
last_gene = gene
last_exon = t[0]
ea.close()
def replaceWithBinary(filename):
filename2 = filename[:-4]+'-binary.txt'
ea = export.ExportFile(filename2)
firstLine=True
fn = filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
ea.write(line)
firstLine=False
else:
try: values = map(float,t[1:])
except Exception: print t[1:];sys.exit()
values2=[]
for v in values:
if v == 0: values2.append('0')
else: values2.append('1')
ea.write(string.join([t[0]]+values2,'\t')+'\n')
ea.close()
def geneMethylationOutput(filename):
filename2 = filename[:-4]+'-binary.txt'
ea = export.ExportFile(filename2)
firstLine=True
fn = filepath(filename)
db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
values = (t[20],t[3]+'-methylation')
db[values]=[]
for value in db:
ea.write(string.join(list(value),'\t')+'\n')
ea.close()
def coincidentIncedence(filename,genes):
exportPairs=False
gene_data=[]
firstLine=True
fn = filepath(filename)
if exportPairs:
filename = filename[:-4]+'_'+genes[0]+'-'+genes[1]+'.txt'
ea = export.ExportFile(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if firstLine:
firstLine=False
header = string.split(data,'\t')[1:]
else:
t = string.split(data,'\t')
gene = t[0]
if gene in genes:
values = map(float,t[1:])
gene_data.append(values)
vals1 = gene_data[0]
vals2 = gene_data[1]
i=0
coincident=[]
for v1 in vals1:
v2 = vals2[i]
#print v1,v2
if v1>1 and v2>1:
coincident.append(i)
i+=1
i=0
population_db={}; coincident_db={}
for h in header:
population=string.split(h,':')[0]
if i in coincident:
try: coincident_db[population]+=1
except Exception: coincident_db[population]=1
try: population_db[population]+=1
except Exception: population_db[population]=1
i+=1
import mappfinder
final_population_percent=[]
for population in population_db:
d = population_db[population]
try: c = coincident_db[population]
except Exception: c = 0
N = float(len(header)) ### num all samples examined
R = float(len(coincident)) ### num all coincedent samples for the TFs
n = d ### num all samples in cluster
r = c ### num all coincident samples in cluster
try: z = mappfinder.zscore(r,n,N,R)
except Exception: z = 0
final_population_percent.append([population,str(c),str(d),str(float(c)/float(d)),str(z)])
if exportPairs:
for line in final_population_percent:
ea.write(string.join(line,'\t')+'\n')
ea.close()
else:
return final_population_percent
def extractFeatures(countinp,IGH_gene_file):
import export
ExonsPresent=False
igh_genes=[]
firstLine = True
for line in open(IGH_gene_file,'rU').xreadlines():
if firstLine: firstLine=False
else:
data = cleanUpLine(line)
gene = string.split(data,'\t')[0]
igh_genes.append(gene)
if 'counts.' in countinp:
feature_file = string.replace(countinp,'counts.','IGH.')
fe = export.ExportFile(feature_file)
firstLine = True
for line in open(countinp,'rU').xreadlines():
if firstLine:
fe.write(line)
firstLine=False
else:
feature_info = string.split(line,'\t')[0]
gene = string.split(feature_info,':')[0]
if gene in igh_genes:
fe.write(line)
fe.close()
def filterForJunctions(countinp):
import export
ExonsPresent=False
igh_genes=[]
firstLine = True
count = 0
if 'counts.' in countinp:
feature_file = countinp[:-4]+'-output.txt'
fe = export.ExportFile(feature_file)
firstLine = True
for line in open(countinp,'rU').xreadlines():
if firstLine:
fe.write(line)
firstLine=False
else:
feature_info = string.split(line,'\t')[0]
junction = string.split(feature_info,'=')[0]
if '-' in junction:
fe.write(line)
count+=1
fe.close()
print count
def countIntronsExons(filename):
import export
exon_db={}
intron_db={}
firstLine = True
last_transcript=None
for line in open(filename,'rU').xreadlines():
if firstLine:
firstLine=False
else:
line = line.rstrip()
t = string.split(line,'\t')
transcript = t[-1]
chr = t[1]
strand = t[2]
start = t[3]
end = t[4]
exon_db[chr,start,end]=[]
if transcript==last_transcript:
if strand == '1':
intron_db[chr,last_end,start]=[]
else:
intron_db[chr,last_start,end]=[]
last_end = end
last_start = start
last_transcript = transcript
print len(exon_db)+1, len(intron_db)+1
def importGeneList(gene_list_file):
genesets=[]
genes=[]
for line in open(gene_list_file,'rU').xreadlines():
gene = line.rstrip()
genes.append(gene)
if len(genes)==5:
genesets.append(genes)
genes=[]
if len(genes)>0 and len(genes)<6:
genes+=(5-len(genes))*[gene]
genesets.append(genes)
return genesets
def customClean(filename):
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'-new.txt'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
firstRow = False
#print len(t)
ea.write(string.join(['UID']+t,'\t')+'\n')
else:
if ';' in t[0]:
uid = string.split(t[0],';')[0]
else:
uid = t[0]
values = map(lambda x: float(x),t[1:])
values.sort()
if values[3]>=1:
ea.write(string.join([uid]+t[1:],'\t')+'\n')
ea.close()
def MakeJunctionFasta(filename):
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'.fasta'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
probeset, seq = string.split(data,'\t')[:2]
ea.write(">"+probeset+'\n')
ea.write(string.upper(seq)+'\n')
ea.close()
def ToppGeneFilter(filename):
import gene_associations, OBO_import
gene_to_symbol = gene_associations.getGeneToUid('Mm',('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'-new.txt'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
firstRow = False
#print len(t)
ea.write(string.join(['Ensembl\t\tCategory'],'\t')+'\n')
else:
symbol = t[1]; category = t[3]
symbol = symbol[0]+string.lower(symbol[1:]) ### Mouse
category = category[:100]
if symbol in symbol_to_gene:
ensembl = symbol_to_gene[symbol][0]
ea.write(string.join([ensembl,symbol,category],'\t')+'\n')
ea.close()
def CountKallistoAlignedJunctions(filename):
fn = filepath(filename)
firstRow=True
#filename = filename[:-4]+'.fasta'
ea = export.ExportFile(filename)
found = False
counts=0
unique={}
ea = export.ExportFile(filename[:-4]+'-Mpo.txt')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if 'ENS' in line and 'JUNC1201' in line:
ea.write(line)
unique[t[0]]=[]
counts+=1
print counts, len(unique)
ea.close()
def filterRandomFile(filename,col1,col2):
fn = filepath(filename)
firstRow=True
counts=0
ea = export.ExportFile(filename[:-4]+'-columns.txt')
for line in open(fn,'rU').xreadlines():
if line[0]!='#':
data = line.rstrip()
t = string.split(data,',')
#print t[col1-1]+'\t'+t[col2-1];sys.exit()
if ' ' in t[col2-1]:
t[col2-1] = string.split(t[col2-1],' ')[2]
ea.write(t[col1-1]+'\t'+t[col2-1]+'\n')
counts+=1
#print counts, len(unique)
ea.close()
def getBlockExonPositions():
fn = '/Users/saljh8/Desktop/Code/AltAnalyze/AltDatabase/EnsMart65/ensembl/Mm/Mm_Ensembl_exon.txt'
firstRow=True
filename = fn[:-4]+'.block.txt'
ea = export.ExportFile(filename)
found = False
lines=0
exon_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
gene,exonid,chromosome,strand,start,stop, a, b, c, d = string.split(data,'\t')
exonid = string.split(exonid,'.')[0]
uid = gene+':'+exonid
if lines>0:
try:
exon_db[uid,strand].append(int(start))
exon_db[uid,strand].append(int(stop))
except Exception:
exon_db[uid,strand] = [int(start)]
exon_db[uid,strand].append(int(stop))
lines+=1
print len(exon_db)
for (uid,strand) in exon_db:
exon_db[uid,strand].sort()
if strand == '-':
exon_db[uid,strand].reverse()
start = str(exon_db[uid,strand][0])
stop = str(exon_db[uid,strand][1])
coord = [start,stop]; coord.sort()
ea.write(uid+'\t'+strand+'\t'+coord[0]+'\t'+coord[1]+'\n')
ea.close()
def combineVariants(fn):
firstRow=True
filename = fn[:-4]+'.gene-level.txt'
ea = export.ExportFile(filename)
found = False
lines=0
gene_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
gene = t[9]
if lines == 0:
header = ['UID']+t[16:]
header = string.join(header,'\t')+'\n'
ea.write(header)
lines+=1
else:
var_calls = map(float,t[16:])
if gene in gene_db:
count_sum_array = gene_db[gene]
count_sum_array = [sum(value) for value in zip(*[count_sum_array,var_calls])]
gene_db[gene] = count_sum_array
else:
gene_db[gene] = var_calls
for gene in gene_db:
var_calls = gene_db[gene]
var_calls2=[]
for i in var_calls:
if i==0: var_calls2.append('0')
else: var_calls2.append('1')
ea.write(gene+'\t'+string.join(var_calls2,'\t')+'\n')
ea.close()
def compareFusions(fn):
firstRow=True
filename = fn[:-4]+'.matrix.txt'
ea = export.ExportFile(filename)
found = False
lines=0
fusion_db={}
sample_list=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
sample, fusion = string.split(data,'\t')
try: fusion_db[fusion].append(sample)
except Exception: fusion_db[fusion] = [sample]
if sample not in sample_list: sample_list.append(sample)
fusion_db2=[]
for fusion in fusion_db:
samples = fusion_db[fusion]
samples2=[]
for s in sample_list:
if s in samples: samples2.append('1')
else: samples2.append('0')
fusion_db[fusion] = samples2
ea.write(string.join(['Fusion']+sample_list,'\t')+'\n')
for fusion in fusion_db:
print [fusion]
ea.write(fusion+'\t'+string.join(fusion_db[fusion],'\t')+'\n')
ea.close()
def customCleanSupplemental(filename):
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'-new.txt'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
line = string.split(data,', ')
gene_data=[]
for gene in line:
gene = string.replace(gene,' ','')
if '/' in gene:
genes = string.split(gene,'/')
gene_data.append(genes[0])
for i in genes[1:]:
gene_data.append(genes[0][:len(genes[1])*-1]+i)
elif '(' in gene:
genes = string.split(gene[:-1],'(')
gene_data+=genes
else:
gene_data.append(gene)
ea.write(string.join(gene_data,' ')+'\n')
ea.close()
if __name__ == '__main__':
#compareFusions('/Users/saljh8/Documents/1-collaborations/CPMC/GMP-MM_r2/MM_fusion_result.txt');sys.exit()
#combineVariants('/Users/saljh8/Documents/1-collaborations/CPMC/GMP-MM_r2/MM_known_variants.txt');sys.exit()
#customCleanSupplemental('/Users/saljh8/Desktop/dataAnalysis/CPMC/TCGA_MM/MM_genes_published.txt');sys.exit()
#customClean('/Users/saljh8/Desktop/dataAnalysis/Driscoll/R3/2000_run1708A_normalized.txt');sys.exit()
#simpleFilter('/Volumes/SEQ-DATA 1/all_10.5_mapped_norm_GC.csv');sys.exit()
#filterRandomFile('/Users/saljh8/Downloads/HuGene-1_1-st-v1.na36.hg19.transcript2.csv',1,8);sys.exit()
filename = '/Users/saljh8/Desktop/Grimes/GEC14078/MergedFiles.txt'
#CountKallistoAlignedJunctions(filename);sys.exit()
filename = '/Users/saljh8/Desktop/Code/AltAnalyze/AltDatabase/EnsMart72/Mm/junction1/junction_critical-junction-seq.txt'
#MakeJunctionFasta(filename);sys.exit()
filename = '/Users/saljh8/Downloads/CoexpressionAtlas.txt'
#ToppGeneFilter(filename); sys.exit()
#countIntronsExons(filename);sys.exit()
#filterForJunctions(filename);sys.exit()
#filename = '/Users/saljh8/Desktop/Grimes/GEC14074/ExpressionOutput/LineageCorrelations-test-protein_coding-zscores.txt'
#runHCOnly(filename,[]); sys.exit()
folder = '/Users/saljh8/Desktop/Code/AltAnalyze/AltDatabase/EnsMart72/ensembl/Hs'
files = UI.read_directory(folder)
for file in files: #:70895507-70895600
if '.bed' in file:
#BedFileCheck(folder+'/'+file)
pass
#sys.exit()
#runPCAonly(filename,[],False,showLabels=False,plotType='2D');sys.exit()
countinp = '/Volumes/salomonis2/SinghLab/20150715_single_GCBCell/bams/ExpressionInput/counts.Bcells.txt'
IGH_gene_file = '/Volumes/salomonis2/SinghLab/20150715_single_GCBCell/bams/ExpressionInput/IGH_genes.txt'
#extractFeatures(countinp,IGH_gene_file);sys.exit()
import UI
#geneMethylationOutput(filename);sys.exit()
#ica(filename);sys.exit()
#replaceWithBinary('/Users/saljh8/Downloads/Neg_Bi_wholegenome.txt');sys.exit()
#simpleFilter('/Volumes/SEQ-DATA/AML-TCGA/ExpressionInput/counts.LAML1.txt');sys.exit()
filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/genes.tpm_tracking-ordered.txt'
#filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/6-5-2015/ExpressionInput/amplify/exp.All-wt-output.txt'
#getlastexon(filename);sys.exit()
TFs = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/TF-by-gene_matrix/all-TFs.txt'
folder = '/Users/saljh8/Downloads/BLASTX2_Gecko.tab'
genes = ['Cebpe','Gfi1']
#genes = ['Gata1','Gfi1b']
#coincentIncedenceTest(filename,TFs);sys.exit()
#coincidentIncedence(filename,genes);sys.exit()
#test(folder);sys.exit()
#files = UI.read_directory(folder)
#for file in files: SimpleCorrdinateToBed(folder+'/'+file)
#filename = '/Users/saljh8/Desktop/bed/RREs0.5_exons_unique.txt'
#simpleIntegrityCheck(filename);sys.exit()
gene_list = ['S100a8','Chd7','Ets1','Chd7','S100a8']
gene_list_file = '/Users/saljh8/Desktop/demo/Amit/ExpressionInput/genes.txt'
gene_list_file = '/Users/saljh8/Desktop/Grimes/Comb-plots/AML_genes-interest.txt'
gene_list_file = '/Users/saljh8/Desktop/dataAnalysis/Grimes/Mm_Sara-single-cell-AML/alt/AdditionalHOPACH/ExpressionInput/AML_combplots.txt'
gene_list_file = '/Users/saljh8/Desktop/Grimes/KashishNormalization/12-16-15/AllelicSeries/ExpressionInput/KO_genes.txt'
gene_list_file = '/Users/saljh8/Desktop/dataAnalysis/Grimes/All-Fluidigm/ExpressionInput/comb_plot2.txt'
genesets = importGeneList(gene_list_file)
filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/comb-plots/exp.IG2_GG1-extended-output.txt'
filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/comb-plots/genes.tpm_tracking-ordered.txt'
filename = '/Users/saljh8/Desktop/demo/Amit/ExpressedCells/GO-Elite_results/3k_selected_LineageGenes-CombPlotInput2.txt'
filename = '/Users/saljh8/Desktop/Grimes/Comb-plots/exp.AML_single-cell-output.txt'
filename = '/Users/saljh8/Desktop/dataAnalysis/Grimes/Mm_Sara-single-cell-AML/alt/AdditionalHOPACH/ExpressionInput/exp.AML.txt'
filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/12-16-15/AllelicSeries/ExpressionInput/exp.KO-output.txt'
filename = '/Users/saljh8/Desktop/dataAnalysis/Grimes/All-Fluidigm/ExpressionInput/exp.Lsk_panorama.txt'
print genesets
for gene_list in genesets:
multipleSubPlots(filename,gene_list,SubPlotType='column')
sys.exit()
plotHistogram(filename);sys.exit()
filename = '/Users/saljh8/Desktop/Grimes/Expression_final_files/ExpressionInput/amplify-wt/DataPlots/Clustering-exp.myeloid-steady-state-PCA-all_wt_myeloid_SingleCell-Klhl7 Dusp7 Slc25a33 H6pd Bcorl1 Sdpr Ypel3 251000-hierarchical_cosine_cosine.cdt'
openTreeView(filename);sys.exit()
pdf1 = "/Users/saljh8/Desktop/Grimes/1.pdf"
pdf2 = "/Users/saljh8/Desktop/Grimes/2.pdf"
outPdf = "/Users/saljh8/Desktop/Grimes/3.pdf"
merge_horizontal(outPdf, pdf1, pdf2);sys.exit()
mergePDFs(pdf1,pdf2,outPdf);sys.exit()
filename = '/Volumes/SEQ-DATA/CardiacRNASeq/BedFiles/ExpressionOutput/Clustering/SampleLogFolds-CardiacRNASeq.txt'
ica(filename);sys.exit()
features = 5
matrix, column_header, row_header, dataset_name, group_db = importData(filename)
Kmeans(features, column_header, row_header); sys.exit()
#graphViz();sys.exit()
filename = '/Users/saljh8/Desktop/delete.txt'
filenames = [filename]
outputClusters(filenames,[]); sys.exit()
#runPCAonly(filename,[],False);sys.exit()
#VennDiagram(); sys.exit()
#buildGraphFromSIF('Ensembl','Mm',None,None); sys.exit()
#clusterPathwayZscores(None); sys.exit()
pruned_folder = '/Users/nsalomonis/Desktop/CBD/LogTransformed/GO-Elite/GO-Elite_results/CompleteResults/ORA_pruned/'
input_ora_folder = '/Users/nsalomonis/Desktop/CBD/LogTransformed/GO-Elite/input/'
files = UI.read_directory(pruned_folder)
for file in files:
if '.sif' in file:
input_file = string.join(string.split(file,'-')[:-1],'-')+'.txt'
sif_file = pruned_folder+file
input_file = input_ora_folder+input_file
buildGraphFromSIF('Ensembl','Hs',sif_file,input_file)
sys.exit()
filenames = [filename]
outputClusters(filenames,[])
|
wuxue/altanalyze
|
clustering.py
|
Python
|
apache-2.0
| 233,205
|
#! /usr/bin/env python
# Copyright (c) 2018 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from urllib2 import HTTPError
from cloudera.director.common.client import ApiClient, Configuration
from cloudera.director.latest import DeploymentsApi
def get_authenticated_client(args):
"""
Create a new API client and authenticate against a server as admin
@param args: dict of parsed command line arguments that
include server host and admin credentials
@rtype: ApiClient
@return: authenticated API client
"""
configuration = Configuration()
configuration.host = args.server
configuration.username = args.admin_username
configuration.password = args.admin_password
configuration.ssl_ca_cert = args.cafile
return ApiClient(configuration=configuration)
def get_deployment_template(client, env_name, dep_name):
"""
Get a deployment template.
@param client: Director API client
@param env_name: environment name
@param dep_name: deployment name
@rtype: DeploymentTemplate
@return: deployment template
"""
api = DeploymentsApi(client)
try:
return api.get_template_redacted(env_name, dep_name)
except HTTPError as error:
if error.code == 404:
print 'Error: the deployment %s does not exist in the environment %s' % (env_name, dep_name)
else:
raise error
def enable_tls_for(template, port, trusted_cert_file):
"""
Change a deployment template to enable TLS communications.
@param template: deployment template
@param port: listening port for Cloudera Manager with TLS enabled
@param trusted_cert_file: file-like object for trusted certificate
@rtype: DeploymentTemplate
@return: updated deployment template
"""
if template.tls_enabled:
raise Exception('Error: the deployment %s already has TLS enabled' % template.name)
template.tls_enabled = True
template.port = port
if trusted_cert_file:
cert_contents = trusted_cert_file.read()
template.trusted_certificate = cert_contents
return template
def disable_tls_for(template, port):
"""
Change a deployment template to disable TLS communications.
@param template: deployment template
@param port: listening port for Cloudera Manager with TLS disabled
@rtype: DeploymentTemplate
@return: updated deployment template
"""
if not template.tls_enabled:
raise Exception('Error: the deployment %s already has TLS disabled' % template.name)
template.tls_enabled = False
template.port = port
template.trusted_certificate = None
return template
def update_deployment_template(client, env_name, dep_name, template):
"""
Update a deployment template.
@param client: Director API client
@param env_name: environment name
@param dep_name: deployment name
@param template: updated deployment template
"""
api = DeploymentsApi(client)
api.update(env_name, dep_name, template)
def main():
"""
Main method.
"""
parser = argparse.ArgumentParser(description='Update TLS communications to a Director ' +
'deployment')
parser.add_argument('--admin-username', default="admin",
help='Name of an user with administrative access (defaults to %(default)s)')
parser.add_argument('--admin-password', default="admin",
help='Password for the administrative user (defaults to %(default)s)')
parser.add_argument('--server', default="http://localhost:7189",
help="Cloudera Altus Director server URL (defaults to %(default)s)")
parser.add_argument('--cafile', default=None,
help='Path to file containing trusted certificate(s) for Cloudera Altus Director ' +
'(defaults to %(default)s); required when Cloudera Altus Director is ' +
'configured for https')
parser.add_argument('--disable', action='store_true',
help='Disable TLS communication instead of enabling it')
parser.add_argument('--trusted-cert-file', type=file, default=None,
help='Path to file containing trusted certificate for Cloudera Manager ' +
'(defaults to %(default)s); optionally include when enabling TLS')
parser.add_argument('env_name',
help="Name of environment containing deployment with TLS enabled")
parser.add_argument('dep_name', help="Name of deployment with TLS enabled")
parser.add_argument('port', type=int, help="Cloudera Manager port")
args = parser.parse_args()
if args.disable and args.trusted_cert_file:
raise Exception('When disabling TLS communication, do not pass a trusted certificate ' +
'for Cloudera Manager')
if args.disable:
progress_action = 'Disabling'
completed_state = 'disabled'
else:
progress_action = 'Enabling'
completed_state = 'enabled'
print '%s TLS communications for deployment %s ...' % (progress_action, args.dep_name)
client = get_authenticated_client(args)
template = get_deployment_template(client, args.env_name, args.dep_name)
if args.disable:
template = disable_tls_for(template, args.port)
else:
template = enable_tls_for(template, args.port, args.trusted_cert_file)
update_deployment_template(client, args.env_name, args.dep_name, template)
print 'TLS communications for deployment %s is %s.' % (args.dep_name, completed_state)
if __name__ == '__main__':
try:
sys.exit(main())
except HTTPError as error:
print error.read()
raise error
|
cloudera/director-scripts
|
tls/update-tls.d6.py
|
Python
|
apache-2.0
| 6,333
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-04-20 22:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20180420_2157'),
]
operations = [
migrations.AlterField(
model_name='subobsrequest',
name='time_executed',
field=models.DateTimeField(blank=True, null=True, verbose_name=b'subrequest executed time'),
),
]
|
ytsapras/robonet_site
|
events/migrations/0004_auto_20180420_2204.py
|
Python
|
gpl-2.0
| 518
|
import requests # pip install requests to get it
import urllib
import json
from django.http import HttpResponse
#from members.models import User
import logging
log = logging.getLogger("api_client")
# Get the token using a POST request and a code
from django.conf import settings
SCOPE = "names relatives introduction:write introduction:read"
# leave these alone
#BASE_URL = "https://api.23andme.com/1/demo/"
BASE_URL = "https://api.23andme.com/1/"
LOGIN_URL = "https://api.23andme.com/authorize/?redirect_uri=%s&response_type=code&client_id=%s&scope=%s" % (settings.CALLBACK_URL, settings.CLIENT_ID, SCOPE)
OAUTH_KEY = "access_token"
class OAuthClient(object):
def __init__(self, access_token=None):
self.access_token = access_token
def get_token(self, authorization_code):
parameters = {
'client_id': settings.CLIENT_ID,
'client_secret': settings.CLIENT_SECRET,
'grant_type': 'authorization_code',
'code': authorization_code, # the authorization code obtained above
'redirect_uri': settings.CALLBACK_URL,
'scope': SCOPE,
}
response = requests.post(
"https://api.23andme.com/token/",
data = parameters
)
print "get_token_response.json: %s" % response.json()
if response.status_code == 200:
return (response.json()['access_token'], response.json()['refresh_token'])
else:
response.raise_for_status()
def refresh_token(self, refresh_token):
parameters = {
'client_id': settings.CLIENT_ID,
'client_secret': settings.CLIENT_SECRET,
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'redirect_uri': settings.CALLBACK_URL,
'scope': SCOPE,
}
response = requests.post(
"https://api.23andme.com/token/",
data = parameters
)
#print "response.json: %s" % response.json()
if response.status_code == 200:
self.access_token = response.json()['access_token']
return (response.json()['access_token'], response.json()['refresh_token'])
else:
response.raise_for_status()
def _get_resource(self, resource):
if self.access_token is None:
raise Exception("access_token cannot be None")
headers = {'Authorization': 'Bearer %s' % self.access_token}
url = "%s%s" % (BASE_URL, resource)
response = requests.get(
url,
headers=headers,
verify=False,
)
#print "_get_resource url: %s" % url
#print "response get response: %s" % response
#print "response get text: %s" % response.text
#print "response get status_code: %s" % response.status_code
#print "response: %s" % response
#print "response.json: %s" % response.json()
#print "response.text: %s" % response.text
if response.status_code == 200:
log.debug('_get_resource url: %s response: %s', url, response)
return response.text
else:
log.debug('_get_resource error url: %s response: %s', url, response)
response.raise_for_status()
return response.text
def _post_resource(self, resource, body):
if self.access_token is None:
raise Exception("access_token cannot be None")
headers = {'Authorization': 'Bearer %s' % self.access_token,
#'Content-Type' : 'application/octet-stream'}
'Content-Type' : 'text/plain'}
url = "%s%s" % (BASE_URL, resource)
log.debug('_post_resource url: %s body: %s' ,url, body)
###just for debug, comment out the next line!!!
#return json.loads('{"introduction": {"status": "sent"}}')
response = requests.post(
url,
headers=headers,
verify=False,
data=body,
)
#log.debug("response.status_code: %s" % response.status_code)
#log.debug('response.text: %s' % response.text)
#log.debug('response.raw: %s' % response.raw)
#log.debug('response headers: %s' % headers)
if response.status_code == 200 and response.text != '500 error':
log.debug('_post_resource url: %s response: %s .text: %s body: %s' ,url, response, response.text, body)
return response.text
else:
#response.raise_for_status()
log.debug('_post_resource error url: %s response: %s .text: %s body: %s' ,url, response, response.text, body)
return response.text
def get_user(self):
return self._get_resource("user/")
def get_name_profile(self):
return self._get_resource("names/")
def get_relatives(self, profile_id):
relatives = 'relatives/' + str(profile_id) + '/?limit=10'
return self._get_resource(relatives)
def post_intro(self, profile_id, match_id, intro_text):
if match_id == None: # this happens if you try to send to yourself
return False
#u = User() # I'm not sure my resend function was a good idea
#if u.too_soon(match_id) == True:
# return False
intro = 'introduction/' + str(profile_id) + '/' + str(match_id) + '/'
body = 'visibility=genome' + '&message_text=' + intro_text
response = self.get_send_status(profile_id, match_id)
if response["introduction"]["status"] == "rejected" or response["introduction"]["visibility"] == "genome":
log.debug('post_intro rejected response: %s match_id: %s', response, match_id)
return False
# make sure the following is set to True!!!
if response["can_send"] == True:
#log.debug('post_intro can send response: %s match_id: %s', response, match_id)
#return True #for testing
response = json.loads(self._post_resource(intro, body))
if response["introduction"]["status"] == "sent":
log.debug('post_intro post sent response: %s match_id: %s', response, match_id)
#response = self.get_send_status(profile_id, match_id)
return True
else:
log.debug('post_intro post not sent response: %s match_id: %s', response, match_id)
return False
else: #let's see if we can cancel and resend an introduction
return False
def get_send_status(self, profile_id, match_id):
intro = 'introduction/' + str(profile_id) + '/' + str(match_id)
response = json.loads(self._get_resource(intro))
if response["introduction"] == {}:
response["introduction"]["status"] = 'none'
response["introduction"]["visibility"] = 'none'
log.debug('get_send_status intro: %s response: %s' ,intro, response)
return response #returns json
def send_cancel(self, profile_id, match_id):
if self.access_token is None:
raise Exception("access_token cannot be None")
headers = {'Authorization': 'Bearer %s' % self.access_token}
#url = "%s%s" % (BASE_URL, resource)
url = BASE_URL + 'introduction/' + profile_id + '/' + match_id + '/?status=cancelled'
#print "url", url
#return True
response = requests.patch(
url,
headers=headers,
verify=False,
)
if response.status_code == 200 and response.text != '500 error':
log.debug('send_cancel url: %s response: %s .text: %s' ,url, response, response.text)
return True
else:
#response.raise_for_status("send cancel")
log.debug('send_cancel error url: %s response: %s .text: %s' ,url, response, response.text)
return False
|
jsbrava/intro23andme
|
api/client.py
|
Python
|
mit
| 7,958
|
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Google (Scholar)
For detailed description of the *REST-full* API see: `Query Parameter
Definitions`_.
.. _Query Parameter Definitions:
https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
"""
# pylint: disable=invalid-name, missing-function-docstring
from urllib.parse import urlencode
from datetime import datetime
from lxml import html
from searx import logger
from searx.utils import (
eval_xpath,
eval_xpath_list,
extract_text,
)
from searx.engines.google import (
get_lang_info,
time_range_dict,
detect_google_sorry,
)
# pylint: disable=unused-import
from searx.engines.google import (
supported_languages_url,
_fetch_supported_languages,
)
# pylint: enable=unused-import
# about
about = {
"website": 'https://scholar.google.com',
"wikidata_id": 'Q494817',
"official_api_documentation": 'https://developers.google.com/custom-search',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
# engine dependent config
categories = ['science']
paging = True
language_support = True
use_locale_domain = True
time_range_support = True
safesearch = False
logger = logger.getChild('google scholar')
def time_range_url(params):
"""Returns a URL query component for a google-Scholar time range based on
``params['time_range']``. Google-Scholar does only support ranges in years.
To have any effect, all the Searx ranges (*day*, *week*, *month*, *year*)
are mapped to *year*. If no range is set, an empty string is returned.
Example::
&as_ylo=2019
"""
# as_ylo=2016&as_yhi=2019
ret_val = ''
if params['time_range'] in time_range_dict:
ret_val= urlencode({'as_ylo': datetime.now().year -1 })
return '&' + ret_val
def request(query, params):
"""Google-Scholar search request"""
offset = (params['pageno'] - 1) * 10
lang_info = get_lang_info(
# pylint: disable=undefined-variable
# params, {}, language_aliases
params, supported_languages, language_aliases
)
# subdomain is: scholar.google.xy
lang_info['subdomain'] = lang_info['subdomain'].replace("www.", "scholar.")
query_url = 'https://'+ lang_info['subdomain'] + '/scholar' + "?" + urlencode({
'q': query,
'hl': lang_info['hl'],
'lr': lang_info['lr'],
'ie': "utf8",
'oe': "utf8",
'start' : offset,
})
query_url += time_range_url(params)
logger.debug("query_url --> %s", query_url)
params['url'] = query_url
logger.debug("HTTP header Accept-Language --> %s", lang_info['Accept-Language'])
params['headers']['Accept-Language'] = lang_info['Accept-Language']
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
)
#params['google_subdomain'] = subdomain
return params
def response(resp):
"""Get response from google's search request"""
results = []
detect_google_sorry(resp)
# which subdomain ?
# subdomain = resp.search_params.get('google_subdomain')
# convert the text to dom
dom = html.fromstring(resp.text)
# parse results
for result in eval_xpath_list(dom, '//div[@class="gs_ri"]'):
title = extract_text(eval_xpath(result, './h3[1]//a'))
if not title:
# this is a [ZITATION] block
continue
url = eval_xpath(result, './h3[1]//a/@href')[0]
content = extract_text(eval_xpath(result, './div[@class="gs_rs"]')) or ''
pub_info = extract_text(eval_xpath(result, './div[@class="gs_a"]'))
if pub_info:
content += "[%s]" % pub_info
pub_type = extract_text(eval_xpath(result, './/span[@class="gs_ct1"]'))
if pub_type:
title = title + " " + pub_type
results.append({
'url': url,
'title': title,
'content': content,
})
# parse suggestion
for suggestion in eval_xpath(dom, '//div[contains(@class, "gs_qsuggest_wrap")]//li//a'):
# append suggestion
results.append({'suggestion': extract_text(suggestion)})
for correction in eval_xpath(dom, '//div[@class="gs_r gs_pda"]/a'):
results.append({'correction': extract_text(correction)})
return results
|
dalf/searx
|
searx/engines/google_scholar.py
|
Python
|
agpl-3.0
| 4,416
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Guillaume Delpierre <gde@llew.me>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: openssl_pkcs12
author:
- Guillaume Delpierre (@gdelpierre)
version_added: "2.7"
short_description: Generate OpenSSL PKCS#12 archive
description:
- This module allows one to (re-)generate PKCS#12.
requirements:
- python-pyOpenSSL
options:
action:
description:
- C(export) or C(parse) a PKCS#12.
type: str
default: export
choices: [ export, parse ]
ca_certificates:
description:
- List of CA certificate to include.
type: list
certificate_path:
description:
- The path to read certificates and private keys from.
- Must be in PEM format.
type: path
force:
description:
- Should the file be regenerated even if it already exists.
type: bool
default: no
friendly_name:
description:
- Specifies the friendly name for the certificate and private key.
type: str
aliases: [ name ]
iter_size:
description:
- Number of times to repeat the encryption step.
type: int
default: 2048
maciter_size:
description:
- Number of times to repeat the MAC step.
type: int
default: 1
passphrase:
description:
- The PKCS#12 password.
type: str
path:
description:
- Filename to write the PKCS#12 file to.
type: path
required: true
privatekey_passphrase:
description:
- Passphrase source to decrypt any input private keys with.
type: str
privatekey_path:
description:
- File to read private key from.
type: path
state:
description:
- Whether the file should exist or not.
All parameters except C(path) are ignored when state is C(absent).
choices: [ absent, present ]
default: present
type: str
src:
description:
- PKCS#12 file path to parse.
type: path
extends_documentation_fragment:
- files
seealso:
- module: openssl_certificate
- module: openssl_csr
- module: openssl_dhparam
- module: openssl_privatekey
- module: openssl_publickey
'''
EXAMPLES = r'''
- name: Generate PKCS#12 file
openssl_pkcs12:
action: export
path: /opt/certs/ansible.p12
friendly_name: raclette
privatekey_path: /opt/certs/keys/key.pem
certificate_path: /opt/certs/cert.pem
ca_certificates: /opt/certs/ca.pem
state: present
- name: Change PKCS#12 file permission
openssl_pkcs12:
action: export
path: /opt/certs/ansible.p12
friendly_name: raclette
privatekey_path: /opt/certs/keys/key.pem
certificate_path: /opt/certs/cert.pem
ca_certificates: /opt/certs/ca.pem
state: present
mode: '0600'
- name: Regen PKCS#12 file
openssl_pkcs12:
action: export
src: /opt/certs/ansible.p12
path: /opt/certs/ansible.p12
friendly_name: raclette
privatekey_path: /opt/certs/keys/key.pem
certificate_path: /opt/certs/cert.pem
ca_certificates: /opt/certs/ca.pem
state: present
mode: '0600'
force: yes
- name: Dump/Parse PKCS#12 file
openssl_pkcs12:
action: parse
src: /opt/certs/ansible.p12
path: /opt/certs/ansible.pem
state: present
- name: Remove PKCS#12 file
openssl_pkcs12:
path: /opt/certs/ansible.p12
state: absent
'''
RETURN = r'''
filename:
description: Path to the generate PKCS#12 file.
returned: changed or success
type: str
sample: /opt/certs/ansible.p12
privatekey:
description: Path to the TLS/SSL private key the public key was generated from.
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
'''
import stat
import os
import traceback
PYOPENSSL_IMP_ERR = None
try:
from OpenSSL import crypto
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
pyopenssl_found = False
else:
pyopenssl_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_bytes, to_native
class PkcsError(crypto_utils.OpenSSLObjectError):
pass
class Pkcs(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(Pkcs, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.action = module.params['action']
self.ca_certificates = module.params['ca_certificates']
self.certificate_path = module.params['certificate_path']
self.friendly_name = module.params['friendly_name']
self.iter_size = module.params['iter_size']
self.maciter_size = module.params['maciter_size']
self.passphrase = module.params['passphrase']
self.pkcs12 = None
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.privatekey_path = module.params['privatekey_path']
self.src = module.params['src']
if module.params['mode'] is None:
module.params['mode'] = '0400'
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(Pkcs, self).check(module, perms_required)
def _check_pkey_passphrase():
if self.privatekey_passphrase:
try:
crypto_utils.load_privatekey(self.path,
self.privatekey_passphrase)
except crypto.Error:
return False
except crypto_utils.OpenSSLBadPassphraseError:
return False
return True
if not state_and_perms:
return state_and_perms
return _check_pkey_passphrase
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'filename': self.path,
}
if self.privatekey_path:
result['privatekey_path'] = self.privatekey_path
return result
def generate(self, module):
"""Generate PKCS#12 file archive."""
self.pkcs12 = crypto.PKCS12()
if self.ca_certificates:
ca_certs = [crypto_utils.load_certificate(ca_cert) for ca_cert
in self.ca_certificates]
self.pkcs12.set_ca_certificates(ca_certs)
if self.certificate_path:
self.pkcs12.set_certificate(crypto_utils.load_certificate(
self.certificate_path))
if self.friendly_name:
self.pkcs12.set_friendlyname(to_bytes(self.friendly_name))
if self.privatekey_path:
try:
self.pkcs12.set_privatekey(crypto_utils.load_privatekey(
self.privatekey_path,
self.privatekey_passphrase)
)
except crypto_utils.OpenSSLBadPassphraseError as exc:
raise PkcsError(exc)
crypto_utils.write_file(
module,
self.pkcs12.export(self.passphrase, self.iter_size, self.maciter_size),
0o600
)
def parse(self, module):
"""Read PKCS#12 file."""
try:
with open(self.src, 'rb') as pkcs12_fh:
pkcs12_content = pkcs12_fh.read()
p12 = crypto.load_pkcs12(pkcs12_content,
self.passphrase)
pkey = crypto.dump_privatekey(crypto.FILETYPE_PEM,
p12.get_privatekey())
crt = crypto.dump_certificate(crypto.FILETYPE_PEM,
p12.get_certificate())
crypto_utils.write_file(module, b'%s%s' % (pkey, crt))
except IOError as exc:
raise PkcsError(exc)
def main():
argument_spec = dict(
action=dict(type='str', default='export', choices=['export', 'parse']),
ca_certificates=dict(type='list', elements='path'),
certificate_path=dict(type='path'),
force=dict(type='bool', default=False),
friendly_name=dict(type='str', aliases=['name']),
iter_size=dict(type='int', default=2048),
maciter_size=dict(type='int', default=1),
passphrase=dict(type='str', no_log=True),
path=dict(type='path', required=True),
privatekey_passphrase=dict(type='str', no_log=True),
privatekey_path=dict(type='path'),
state=dict(type='str', default='present', choices=['absent', 'present']),
src=dict(type='path'),
)
required_if = [
['action', 'parse', ['src']],
]
module = AnsibleModule(
add_file_common_args=True,
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True,
)
if not pyopenssl_found:
module.fail_json(msg=missing_required_lib('pyOpenSSL'), exception=PYOPENSSL_IMP_ERR)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg="The directory '%s' does not exist or the path is not a directory" % base_dir
)
pkcs12 = Pkcs(module)
changed = False
if module.params['state'] == 'present':
if module.check_mode:
result = pkcs12.dump()
result['changed'] = module.params['force'] or not pkcs12.check(module)
module.exit_json(**result)
try:
if not pkcs12.check(module, perms_required=False) or module.params['force']:
if module.params['action'] == 'export':
if not module.params['friendly_name']:
module.fail_json(msg='Friendly_name is required')
pkcs12.generate(module)
changed = True
else:
pkcs12.parse(module)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, changed):
changed = True
except PkcsError as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = pkcs12.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
if os.path.exists(module.params['path']):
try:
pkcs12.remove(module)
changed = True
except PkcsError as exc:
module.fail_json(msg=to_native(exc))
result = pkcs12.dump()
result['changed'] = changed
if os.path.exists(module.params['path']):
file_mode = "%04o" % stat.S_IMODE(os.stat(module.params['path']).st_mode)
result['mode'] = file_mode
module.exit_json(**result)
if __name__ == '__main__':
main()
|
dagwieers/ansible
|
lib/ansible/modules/crypto/openssl_pkcs12.py
|
Python
|
gpl-3.0
| 11,535
|
import hvac
from st2actions.runners.pythonrunner import Action
class VaultBaseAction(Action):
def __init__(self, config):
super(VaultBaseAction, self).__init__(config)
self.vault = self._get_client()
def _get_client(self):
url = self.config['url']
token = self.config['token']
cert = self.config['cert']
verify = self.config['verify']
client = hvac.Client(url=url, token=token, cert=cert, verify=verify)
return client
|
pidah/st2contrib
|
packs/vault/actions/lib/action.py
|
Python
|
apache-2.0
| 495
|
import cherrypy
# 這是 MAN 類別的定義
'''
# 在 application 中導入子模組
import programs.cdag30.man as cdag30_man
# 加入 cdag30 模組下的 man.py 且以子模組 man 對應其 MAN() 類別
root.cdag30.man = cdag30_man.MAN()
# 完成設定後, 可以利用
/cdag30/man/assembly
# 呼叫 man.py 中 MAN 類別的 assembly 方法
'''
class MAN(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014CDA 協同專案下的 cdag30 模組下的 MAN 類別.<br /><br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="assembly">執行 MAN 類別中的 assembly 方法</a><br /><br />
請確定下列零件於 V:/home/lego/man 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="/static/lego_man.7z">lego_man.7z</a>(滑鼠右鍵存成 .7z 檔案)<br />
'''
return outstring
@cherrypy.expose
def assembly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js"></script>
</head>
<body>
</script><script language="JavaScript">
/*man2.py 完全利用函式呼叫進行組立*/
/*設計一個零件組立函式*/
// featID 為組立件第一個組立零件的編號
// inc 則為 part1 的組立順序編號, 第一個入組立檔編號為 featID+0
// part2 為外加的零件名稱
////////////////////////////////////////////////
// axis_plane_assembly 組立函式
////////////////////////////////////////////////
function axis_plane_assembly(session, assembly, transf, featID, inc, part2, axis1, plane1, axis2, plane2){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
var asmDatums = new Array(axis1, plane1);
var compDatums = new Array(axis2, plane2);
var relation = new Array (pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (true, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
}
// 以上為 axis_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly 採 align 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號, 也就是樂高人偶的 body
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly2 採 mate 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly2(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號, 也就是樂高人偶的 body
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly2() 函式, 主要採三面 MATE 組立
//
// 假如 Creo 所在的操作系統不是 Windows 環境
if (!pfcIsWindows())
// 則啟動對應的 UniversalXPConnect 執行權限 (等同 Windows 下的 ActiveX)
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// pfcGetProESession() 是位於 pfcUtils.js 中的函式, 確定此 JavaScript 是在嵌入式瀏覽器中執行
var session = pfcGetProESession();
// 設定 config option, 不要使用元件組立流程中內建的假設約束條件
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣, Pro/Web.Link 中的變數無法直接建立, 必須透過 pfcCreate() 建立
var identityMatrix = pfcCreate("pfcMatrix3D");
// 建立 identity 位置矩陣
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set(x, y, 1.0);
else
identityMatrix.Set(x, y, 0.0);
}
// 利用 identityMatrix 建立 transf 座標轉換矩陣
var transf = pfcCreate("pfcTransform3D").Create(identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔的空白組立檔案, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件, 若不符合條件則丟出錯誤訊息
if (model == void null || model.Type != pfcCreate("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
// 將此模型設為組立物件
var assembly = model;
/////////////////////////////////////////////////////////////////
// 開始執行組立, 全部採函式呼叫組立
/////////////////////////////////////////////////////////////////
// 利用函式呼叫組立右手 ARM, 組立增量次序為 1
axis_plane_assembly(session, assembly, transf, 40, 0,
"LEGO_ARM_RT.prt", "A_13", "DTM1", "A_4", "DTM1");
// 利用函式呼叫組立右手 HAND, 組立增量次序為 3
axis_plane_assembly(session, assembly, transf, 40, 1,
"LEGO_HAND.prt", "A_2", "DTM2", "A_1", "DTM3");
// 利用函式呼叫組立左手 ARM, 組立增量次序為 2
axis_plane_assembly(session, assembly, transf, 40, 0,
"LEGO_ARM_LT.prt", "A_9", "DTM2", "A_4", "DTM1");
// 利用函式呼叫組立左手 HAND, 組立增量次序為 4
axis_plane_assembly(session, assembly, transf, 42, 1,
"LEGO_HAND.prt", "A_2", "DTM2", "A_1", "DTM3");
// regenerate 並且 repaint 組立檔案
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring
|
xindus40223115/w16b_test
|
man2.py
|
Python
|
gpl-3.0
| 11,962
|
from ajenti.ui import *
from ajenti.plugins.dashboard.api import *
from ajenti.com import implements, Plugin
from api import *
class NetworkWidget(Plugin):
implements(IDashboardWidget)
title = 'Networking'
def get_ui(self):
cfg = self.app.get_backend(INetworkConfig)
w = UI.LayoutTable()
for x in cfg.interfaces:
i = cfg.interfaces[x]
w.append(UI.LayoutTableRow(
UI.Image(file='/dl/network/%s.png'%('up' if i.up else 'down')),
UI.Label(text=i.name),
UI.Label(text=cfg.get_ip(i)),
spacing=4
))
return w
|
DmZ/ajenti
|
plugins/network/widget.py
|
Python
|
lgpl-3.0
| 699
|
""" some image manipulation functions like scaling, rotating, etc...
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
from gputools import map_coordinates
from scipy import ndimage
import pytest
def create_shape(shape=(100, 110, 120)):
d = np.zeros(shape, np.float32)
ss = tuple([slice(s // 10, 9 * s // 10) for s in shape])
d[ss] = 1+np.random.uniform(0,1,d[ss].shape)
for i in range(len(shape)):
ss0 = list(slice(None) for _ in range(len(shape)))
ss0[i] = (10. / min(shape) * np.arange(shape[i])) % 2 > 1
d[ss0] = 0
return d
def check_error(func):
def test_func(check=True, nstacks=10):
np.random.seed(42)
for _ in range(nstacks):
ndim = np.random.choice((2,3))
shape = np.random.randint(22, 55, ndim)
x = create_shape(shape)
out1, out2 = func(x)
if check:
np.testing.assert_allclose(out1, out2, atol=1e-2, rtol=1.e-2)
return x, out1, out2
return test_func
@check_error
def test_map_coordinates(x):
coordinates = np.stack([np.arange(10) ** 2] * x.ndim)
coordinates = np.random.randint(0,min(x.shape),(x.ndim,100))
print(coordinates.shape, x.shape)
out1 = map_coordinates(x, coordinates, interpolation="linear")
out2 = ndimage.map_coordinates(x, coordinates, order=1, prefilter=False)
return out1, out2
if __name__ == '__main__':
x, y1, y2 = test_map_coordinates(check=False, nstacks=1)
|
maweigert/gputools
|
tests/transforms/test_map_coordinates.py
|
Python
|
bsd-3-clause
| 1,538
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2008 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
import os
import sys
from collections import OrderedDict
from dateutil import parser
from datetime import datetime, timedelta
import pytz
from owslib.etree import etree, ParseError
from owslib.namespaces import Namespaces
from urllib.parse import urlsplit, urlencode, urlparse, parse_qs, urlunparse, parse_qsl
import copy
from io import StringIO, BytesIO
import re
from copy import deepcopy
import warnings
import requests
from requests.auth import AuthBase
import codecs
"""
Utility functions and classes
"""
class ServiceException(Exception):
# TODO: this should go in ows common module when refactored.
pass
# http://stackoverflow.com/questions/6256183/combine-two-dictionaries-of-dictionaries-python
def dict_union(d1, d2):
return dict((x, (dict_union(d1.get(x, {}), d2[x]) if isinstance(d2.get(x), dict) else d2.get(x, d1.get(x))))
for x in set(list(d1.keys()) + list(d2.keys())))
# Infinite DateTimes for Python. Used in SWE 2.0 and other OGC specs as "INF" and "-INF"
class InfiniteDateTime(object):
def __lt__(self, other):
return False
def __gt__(self, other):
return True
def timetuple(self):
return tuple()
class NegativeInfiniteDateTime(object):
def __lt__(self, other):
return True
def __gt__(self, other):
return False
def timetuple(self):
return tuple()
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def format_string(prop_string):
"""
Formats a property string to remove spaces and go from CamelCase to pep8
from: http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
if prop_string is None:
return ''
st_r = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', prop_string)
st_r = st_r.replace(' ', '')
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', st_r).lower()
def xml_to_dict(root, prefix=None, depth=1, diction=None):
"""
Recursively iterates through an xml element to convert each element in the tree to a (key,val).
Where key is the element tag and val is the inner-text of the element.
Note that this recursively go through the tree until the depth specified.
Parameters
===========
:root - root xml element, starting point of iteration
:prefix - a string to prepend to the resulting key (optional)
:depth - the number of depths to process in the tree (optional)
:diction - the dictionary to insert the (tag,text) pairs into (optional)
Return
=======
Dictionary of (key,value); where key is the element tag stripped of namespace and cleaned up to be pep8 and
value is the inner-text of the element. Note that duplicate elements will be replaced by the last element of the
same tag in the tree.
"""
ret = diction if diction is not None else dict()
for child in root:
val = testXMLValue(child)
# skip values that are empty or None
if val is None or val == '':
if depth > 1:
ret = xml_to_dict(child, prefix=prefix, depth=(depth - 1), diction=ret)
continue
key = format_string(child.tag.split('}')[-1])
if prefix is not None:
key = prefix + key
ret[key] = val
if depth > 1:
ret = xml_to_dict(child, prefix=prefix, depth=(depth - 1), diction=ret)
return ret
class ResponseWrapper(object):
"""
Return object type from openURL.
Provides a thin shim around requests response object to maintain code compatibility.
"""
def __init__(self, response):
self._response = response
def info(self):
return self._response.headers
def read(self):
return self._response.content
def geturl(self):
return self._response.url.replace('&&', '&')
# @TODO: __getattribute__ for poking at response
def openURL(url_base, data=None, method='Get', cookies=None, username=None, password=None, timeout=30, headers=None,
verify=True, cert=None, auth=None):
"""
Function to open URLs.
Uses requests library but with additional checks for OGC service exceptions and url formatting.
Also handles cookies and simple user password authentication.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided.
Defaults to ``True``.
:param cert: (optional) A file with a client side certificate for SSL authentication
to send with the :class:`Request`.
:param auth: Instance of owslib.util.Authentication
"""
headers = headers if headers is not None else {}
rkwargs = {}
rkwargs['timeout'] = timeout
if auth:
if username:
auth.username = username
if password:
auth.password = password
if cert:
auth.cert = cert
verify = verify and auth.verify
else:
auth = Authentication(username, password, cert, verify)
if auth.username and auth.password:
rkwargs['auth'] = (auth.username, auth.password)
elif auth.auth_delegate is not None:
rkwargs['auth'] = auth.auth_delegate
rkwargs['cert'] = auth.cert
rkwargs['verify'] = verify
# FIXUP for WFS in particular, remove xml style namespace
# @TODO does this belong here?
method = method.split("}")[-1]
if method.lower() == 'post':
try:
etree.fromstring(data)
headers['Content-Type'] = 'text/xml'
except (ParseError, UnicodeEncodeError):
pass
rkwargs['data'] = data
elif method.lower() == 'get':
rkwargs['params'] = data
else:
raise ValueError("Unknown method ('%s'), expected 'get' or 'post'" % method)
if cookies is not None:
rkwargs['cookies'] = cookies
req = requests.request(method.upper(), url_base, headers=headers, **rkwargs)
if req.status_code in [400, 401]:
raise ServiceException(req.text)
if req.status_code in [404, 500, 502, 503, 504]: # add more if needed
req.raise_for_status()
# check for service exceptions without the http header set
if 'Content-Type' in req.headers and \
req.headers['Content-Type'] in ['text/xml', 'application/xml', 'application/vnd.ogc.se_xml']:
# just in case 400 headers were not set, going to have to read the xml to see if it's an exception report.
se_tree = etree.fromstring(req.content)
# to handle the variety of namespaces and terms across services
# and versions, especially for "legacy" responses like WMS 1.3.0
possible_errors = [
'{http://www.opengis.net/ows}Exception',
'{http://www.opengis.net/ows/1.1}Exception',
'{http://www.opengis.net/ogc}ServiceException',
'ServiceException'
]
for possible_error in possible_errors:
serviceException = se_tree.find(possible_error)
if serviceException is not None:
# and we need to deal with some message nesting
raise ServiceException('\n'.join([t.strip() for t in serviceException.itertext() if t.strip()]))
return ResponseWrapper(req)
# default namespace for nspath is OWS common
OWS_NAMESPACE = 'http://www.opengis.net/ows/1.1'
def nspath(path, ns=OWS_NAMESPACE):
"""
Prefix the given path with the given namespace identifier.
Parameters
----------
- path: ElementTree API Compatible path expression
- ns: the XML namespace URI.
"""
if ns is None or path is None:
return -1
components = []
for component in path.split('/'):
if component != '*':
component = '{%s}%s' % (ns, component)
components.append(component)
return '/'.join(components)
def nspath_eval(xpath, namespaces):
''' Return an etree friendly xpath '''
out = []
for chunks in xpath.split('/'):
namespace, element = chunks.split(':')
out.append('{%s}%s' % (namespaces[namespace], element))
return '/'.join(out)
def cleanup_namespaces(element):
""" Remove unused namespaces from an element """
if etree.__name__ == 'lxml.etree':
etree.cleanup_namespaces(element)
return element
else:
return etree.fromstring(etree.tostring(element))
def add_namespaces(root, ns_keys):
if isinstance(ns_keys, str):
ns_keys = [ns_keys]
namespaces = Namespaces()
ns_keys = [(x, namespaces.get_namespace(x)) for x in ns_keys]
if etree.__name__ != 'lxml.etree':
# We can just add more namespaces when not using lxml.
# We can't re-add an existing namespaces. Get a list of current
# namespaces in use
existing_namespaces = set()
for elem in root.iter():
if elem.tag[0] == "{":
uri, tag = elem.tag[1:].split("}")
existing_namespaces.add(namespaces.get_namespace_from_url(uri))
for key, link in ns_keys:
if link is not None and key not in existing_namespaces:
root.set("xmlns:%s" % key, link)
return root
else:
# lxml does not support setting xmlns attributes
# Update the elements nsmap with new namespaces
new_map = root.nsmap
for key, link in ns_keys:
if link is not None:
new_map[key] = link
# Recreate the root element with updated nsmap
new_root = etree.Element(root.tag, nsmap=new_map)
# Carry over attributes
for a, v in list(root.items()):
new_root.set(a, v)
# Carry over children
for child in root:
new_root.append(deepcopy(child))
return new_root
def getXMLInteger(elem, tag):
"""
Return the text within the named tag as an integer.
Raises an exception if the tag cannot be found or if its textual
value cannot be converted to an integer.
Parameters
----------
- elem: the element to search within
- tag: the name of the tag to look for
"""
e = elem.find(tag)
if e is None:
raise ValueError('Missing %s in %s' % (tag, elem))
return int(e.text.strip())
def testXMLValue(val, attrib=False):
"""
Test that the XML value exists, return val.text, else return None
Parameters
----------
- val: the value to be tested
"""
if val is not None:
if attrib:
return val.strip()
elif val.text:
return val.text.strip()
else:
return None
else:
return None
def testXMLAttribute(element, attribute):
"""
Test that the XML element and attribute exist, return attribute's value, else return None
Parameters
----------
- element: the element containing the attribute
- attribute: the attribute name
"""
if element is not None:
return element.get(attribute)
return None
def http_post(url=None, request=None, lang='en-US', timeout=10, username=None, password=None, auth=None):
"""
Invoke an HTTP POST request
Parameters
----------
- url: the URL of the server
- request: the request message
- lang: the language
- timeout: timeout in seconds
"""
if url is None:
raise ValueError("URL required")
u = urlsplit(url)
headers = {
'User-Agent': 'OWSLib (https://geopython.github.io/OWSLib)',
'Content-type': 'text/xml',
'Accept': 'text/xml,application/xml',
'Accept-Language': lang,
'Accept-Encoding': 'gzip,deflate',
'Host': u.netloc,
}
rkwargs = {}
if auth:
if username:
auth.username = username
if password:
auth.password = password
else:
auth = Authentication(username, password)
if auth.username is not None and auth.password is not None:
rkwargs['auth'] = (auth.username, auth.password)
elif auth.auth_delegate is not None:
rkwargs['auth'] = auth.auth_delegate
rkwargs['verify'] = auth.verify
rkwargs['cert'] = auth.cert
up = requests.post(url, request, headers=headers, **rkwargs)
return up.content
def http_get(*args, **kwargs):
# Copy input kwargs so the dict can be modified
rkwargs = copy.deepcopy(kwargs)
# Use Authentication instance if provided, else create one
auth = rkwargs.pop('auth', None)
if auth is not None:
if isinstance(auth, (tuple, list)):
auth = Authentication(*auth)
else:
auth = Authentication()
# Populate values with other arguments supplied
if 'username' in rkwargs:
auth.username = rkwargs.pop('username')
if 'password' in rkwargs:
auth.password = rkwargs.pop('password')
if 'cert' in rkwargs:
auth.cert = rkwargs.pop('cert')
if 'verify' in rkwargs:
auth.verify = rkwargs.pop('verify')
# Build keyword args for call to requests.get()
if auth.username and auth.password:
rkwargs.setdefault('auth', (auth.username, auth.password))
elif auth.auth_delegate is not None:
rkwargs['auth'] = auth.auth_delegate
else:
rkwargs.setdefault('auth', None)
rkwargs.setdefault('cert', rkwargs.get('cert'))
rkwargs.setdefault('verify', rkwargs.get('verify', True))
return requests.get(*args, **rkwargs)
def element_to_string(element, encoding=None, xml_declaration=False):
"""
Returns a string from a XML object
Parameters
----------
- element: etree Element
- encoding (optional): encoding in string form. 'utf-8', 'ISO-8859-1', etc.
- xml_declaration (optional): whether to include xml declaration
"""
output = None
if encoding is None:
encoding = "ISO-8859-1"
if etree.__name__ == 'lxml.etree':
if xml_declaration:
if encoding in ['unicode', 'utf-8']:
output = '<?xml version="1.0" encoding="utf-8" standalone="no"?>\n{}'.format(
etree.tostring(element, encoding='unicode'))
else:
output = etree.tostring(element, encoding=encoding, xml_declaration=True)
else:
output = etree.tostring(element)
else:
if xml_declaration:
output = '<?xml version="1.0" encoding="{}" standalone="no"?>\n{}'.format(
encoding, etree.tostring(element, encoding=encoding))
else:
output = etree.tostring(element)
return output
def xml2string(xml):
"""
Return a string of XML object
Parameters
----------
- xml: xml string
"""
warnings.warn("DEPRECIATION WARNING! You should now use the 'element_to_string' method \
The 'xml2string' method will be removed in a future version of OWSLib.")
return '<?xml version="1.0" encoding="ISO-8859-1" standalone="no"?>\n' + xml
def xmlvalid(xml, xsd):
"""
Test whether an XML document is valid
Parameters
----------
- xml: XML content
- xsd: pointer to XML Schema (local file path or URL)
"""
xsd1 = etree.parse(xsd)
xsd2 = etree.XMLSchema(xsd1)
doc = etree.parse(StringIO(xml))
return xsd2.validate(doc)
def xmltag_split(tag):
''' Return XML element bare tag name (without prefix) '''
try:
return tag.split('}')[1]
except Exception:
return tag
def getNamespace(element):
''' Utility method to extract the namespace from an XML element tag encoded as {namespace}localname. '''
if element.tag[0] == '{':
return element.tag[1:].split("}")[0]
else:
return ""
def build_get_url(base_url, params, overwrite=False):
''' Utility function to build a full HTTP GET URL from the service base URL and a dictionary of HTTP parameters.
TODO: handle parameters case-insensitive?
@param overwrite: boolean flag to allow overwrite of parameters of the base_url (default: False)
'''
qs_base = []
if base_url.find('?') != -1:
qs_base = parse_qsl(base_url.split('?')[1])
qs_params = []
for key, value in list(params.items()):
qs_params.append((key, value))
qs = qs_add = []
if overwrite is True:
# all params and additional base
qs = qs_params
qs_add = qs_base
else:
# all base and additional params
qs = qs_base
qs_add = qs_params
pars = [x[0] for x in qs]
for key, value in qs_add:
if key not in pars:
qs.append((key, value))
urlqs = urlencode(tuple(qs))
return base_url.split('?')[0] + '?' + urlqs
def dump(obj, prefix=''):
'''Utility function to print to standard output a generic object with all its attributes.'''
print(("{} {}.{} : {}".format(prefix, obj.__module__, obj.__class__.__name__, obj.__dict__)))
def getTypedValue(data_type, value):
'''Utility function to cast a string value to the appropriate XSD type. '''
# If the default value is empty
if value is None:
return
if data_type == 'boolean':
return True if value.lower() == 'true' else False
elif data_type == 'integer':
return int(value)
elif data_type == 'float':
return float(value)
elif data_type == 'string':
return str(value)
else:
return value # no type casting
def extract_time(element):
''' return a datetime object based on a gml text string
ex:
<gml:beginPosition>2006-07-27T21:10:00Z</gml:beginPosition>
<gml:endPosition indeterminatePosition="now"/>
If there happens to be a strange element with both attributes and text,
use the text.
ex: <gml:beginPosition indeterminatePosition="now">2006-07-27T21:10:00Z</gml:beginPosition>
Would be 2006-07-27T21:10:00Z, not 'now'
'''
if element is None:
return None
try:
dt = parser.parse(element.text)
except Exception:
att = testXMLValue(element.attrib.get('indeterminatePosition'), True)
if att and att == 'now':
dt = datetime.utcnow()
dt.replace(tzinfo=pytz.utc)
else:
dt = None
return dt
def extract_xml_list(elements):
"""
Some people don't have seperate tags for their keywords and seperate them with
a newline. This will extract out all of the keywords correctly.
"""
keywords = (re.split(r'[\n\r]+', f.text) for f in elements if f.text)
flattened = (item.strip() for sublist in keywords for item in sublist)
remove_blank = [_f for _f in flattened if _f]
return remove_blank
def strip_bom(raw_text):
""" return the raw (assumed) xml response without the BOM
"""
boms = [
# utf-8
codecs.BOM_UTF8,
# utf-16
codecs.BOM,
codecs.BOM_BE,
codecs.BOM_LE,
codecs.BOM_UTF16,
codecs.BOM_UTF16_LE,
codecs.BOM_UTF16_BE,
# utf-32
codecs.BOM_UTF32,
codecs.BOM_UTF32_LE,
codecs.BOM_UTF32_BE
]
if isinstance(raw_text, bytes):
for bom in boms:
if raw_text.startswith(bom):
return raw_text[len(bom):]
return raw_text
def clean_ows_url(url):
"""
clean an OWS URL of basic service elements
source: https://stackoverflow.com/a/11640565
"""
if url is None or not url.startswith('http'):
return url
filtered_kvp = {}
basic_service_elements = ('service', 'version', 'request')
parsed = urlparse(url)
qd = parse_qs(parsed.query, keep_blank_values=True)
for key, value in list(qd.items()):
if key.lower() not in basic_service_elements:
filtered_kvp[key] = value
newurl = urlunparse([
parsed.scheme,
parsed.netloc,
parsed.path,
parsed.params,
urlencode(filtered_kvp, doseq=True),
parsed.fragment
])
return newurl
def bind_url(url):
"""binds an HTTP GET query string endpiont"""
if url.find('?') == -1: # like http://host/wms
binder = '?'
# if like http://host/wms?foo=bar& or http://host/wms?foo=bar
if url.find('=') != -1:
if url.find('&', -1) != -1: # like http://host/wms?foo=bar&
binder = ''
else: # like http://host/wms?foo=bar
binder = '&'
# if like http://host/wms?foo
if url.find('?') != -1:
if url.find('?', -1) != -1: # like http://host/wms?
binder = ''
elif url.find('&', -1) == -1: # like http://host/wms?foo=bar
binder = '&'
return '%s%s' % (url, binder)
import logging
# Null logging handler
NullHandler = logging.NullHandler
log = logging.getLogger('owslib')
log.addHandler(NullHandler())
def which_etree():
"""decipher which etree library is being used by OWSLib"""
which_etree = None
if 'lxml' in etree.__file__:
which_etree = 'lxml.etree'
elif 'xml/etree' in etree.__file__:
which_etree = 'xml.etree'
elif 'elementree' in etree.__file__:
which_etree = 'elementtree.ElementTree'
return which_etree
def findall(root, xpath, attribute_name=None, attribute_value=None):
"""Find elements recursively from given root element based on
xpath and possibly given attribute
:param root: Element root element where to start search
:param xpath: xpath defintion, like {http://foo/bar/namespace}ElementName
:param attribute_name: name of possible attribute of given element
:param attribute_value: value of the attribute
:return: list of elements or None
"""
found_elements = []
if attribute_name is not None and attribute_value is not None:
xpath = '%s[@%s="%s"]' % (xpath, attribute_name, attribute_value)
found_elements = root.findall('.//' + xpath)
if found_elements == []:
found_elements = None
return found_elements
def datetime_from_iso(iso):
"""returns a datetime object from dates in the format 2001-07-01T00:00:00Z or 2001-07-01T00:00:00.000Z """
try:
iso_datetime = datetime.strptime(iso, "%Y-%m-%dT%H:%M:%SZ")
except Exception:
iso_datetime = datetime.strptime(iso, "%Y-%m-%dT%H:%M:%S.%fZ")
return iso_datetime
def datetime_from_ansi(ansi):
"""Converts an ansiDate (expressed as a number = the nuber of days since the datum origin of ansi)
to a python datetime object.
"""
datumOrigin = datetime(1600, 12, 31, 0, 0, 0)
return datumOrigin + timedelta(ansi)
def is_number(s):
"""simple helper to test if value is number as requests with numbers don't
need quote marks
"""
try:
float(s)
return True
except ValueError:
return False
def makeString(value):
# using repr unconditionally breaks things in some circumstances if a
# value is already a string
if type(value) is not str:
sval = repr(value)
else:
sval = value
return sval
def param_list_to_url_string(param_list, param_name):
"""Converts list of tuples for certain WCS GetCoverage keyword arguments
(subsets, resolutions, sizes) to a url-encoded string
"""
string = ''
for param in param_list:
if len(param) > 2:
if not is_number(param[1]):
string += "&" + urlencode({param_name: param[0] + '("' + makeString(param[1]) + '","' + makeString(param[2]) + '")'}) # noqa
else:
string += "&" + urlencode({param_name: param[0] + "(" + makeString(param[1]) + "," + makeString(param[2]) + ")"}) # noqa
else:
if not is_number(param[1]):
string += "&" + urlencode({param_name: param[0] + '("' + makeString(param[1]) + '")'}) # noqa
else:
string += "&" + urlencode({param_name: param[0] + "(" + makeString(param[1]) + ")"}) # noqa
return string
def is_vector_grid(grid_elem):
pass
class Authentication(object):
_USERNAME = None
_PASSWORD = None
_AUTH_DELEGATE = None
_CERT = None
_VERIFY = None
def __init__(self, username=None, password=None,
cert=None, verify=True, shared=False,
auth_delegate=None):
'''
:param str username=None: Username for basic authentication, None for
unauthenticated access (or if using cert/verify)
:param str password=None: Password for basic authentication, None for
unauthenticated access (or if using cert/verify)
:param cert=None: Either a str (path to a combined certificate/key) or
tuple/list of paths (certificate, key). If supplied, the target
files must exist.
:param verify=True: Either a bool (verify SSL certificates, use system
CA bundle) or str (path to a specific CA bundle). If a str, the
target file must exist.
:param bool shared=False: Set to True to make the values be class-level
attributes (shared among instances where shared=True) instead of
instance-level (shared=False, default)
:param AuthBase auth_delegate=None: Instance of requests' AuthBase to
allow arbitrary authentication schemes - mutually exclusive with
username/password arguments.
'''
self.shared = shared
self._username = username
self._password = password
self._cert = cert
self._verify = verify
self._auth_delegate = auth_delegate
# Trigger the setters to validate the parameters. This couldn't be done directly
# since some parameters are mutually exclusive.
self.username = username
self.password = password
self.cert = cert
self.verify = verify
self.auth_delegate = auth_delegate
@property
def username(self):
if self.shared:
return self._USERNAME
return self._username
@username.setter
def username(self, value):
if value is not None:
if not isinstance(value, str):
raise TypeError('Value for "username" must be a str')
if self.auth_delegate is not None:
raise ValueError('Authentication instances may have username/password or auth_delegate set,'
' but not both')
if self.shared:
self.__class__._USERNAME = value
else:
self._username = value
@property
def password(self):
if self.shared:
return self._PASSWORD
return self._password
@password.setter
def password(self, value):
if value is not None:
if not isinstance(value, str):
raise TypeError('Value for "password" must be a str')
if self.auth_delegate is not None:
raise ValueError('Authentication instances may have username/password or auth_delegate set,'
' but not both')
if self.shared:
self.__class__._PASSWORD = value
else:
self._password = value
@property
def cert(self):
if self.shared:
return self._CERT
return self._cert
@cert.setter
def cert(self, certificate, key=None):
error = 'Value for "cert" must be a str path to a file or list/tuple of str paths'
value = None
if certificate is None:
value = certificate
elif isinstance(certificate, (list, tuple)):
for _ in certificate:
if not isinstance(_, str):
raise TypeError(error)
os.stat(_) # Raises OSError/FileNotFoundError if missing
# Both paths supplied as same argument
value = tuple(certificate)
elif isinstance(certificate, str):
os.stat(certificate) # Raises OSError/FileNotFoundError if missing
if isinstance(key, str):
# Separate files for certificate and key
value = (certificate, key)
else:
# Assume combined file of both certificate and key
value = certificate
else:
raise TypeError(error)
if self.shared:
self.__class__._CERT = value
else:
self._cert = value
@property
def verify(self):
if self.shared:
return self._VERIFY
return self._verify
@verify.setter
def verify(self, value):
if value is None:
pass # Passthrough when clearing the value
elif not isinstance(value, (bool, str)):
raise TypeError(
'Value for "verify" must a bool or str path to a file')
elif isinstance(value, str):
os.stat(value) # Raises OSError/FileNotFoundError if missing
if self.shared:
self.__class__._VERIFY = value
else:
self._verify = value
@property
def auth_delegate(self):
if self.shared:
return self._AUTH_DELEGATE
return self._auth_delegate
@auth_delegate.setter
def auth_delegate(self, value):
if value is not None:
if not isinstance(value, AuthBase):
raise TypeError('Value for "auth_delegate" must be an instance of AuthBase')
if self.username is not None or self.password is not None:
raise ValueError('Authentication instances may have username/password or auth_delegate set,'
' but not both')
if self.shared:
self.__class__._AUTH_DELEGATE = value
else:
self._auth_delegate = value
@property
def urlopen_kwargs(self):
if self.auth_delegate is not None:
raise NotImplementedError("The urlopen_kwargs property is not supported when auth_delegate is set")
return {
'username': self.username,
'password': self.password,
'cert': self.cert,
'verify': self.verify
}
def __repr__(self, *args, **kwargs):
return '<{} shared={} username={} password={} cert={} verify={} auth_delegate={}>'.format(
self.__class__.__name__, self.shared, self.username, self.password, self.cert, self.verify,
self.auth_delegate)
|
kalxas/OWSLib
|
owslib/util.py
|
Python
|
bsd-3-clause
| 30,793
|
'''
Created on 7 Oct 2009
@author: pnorton
'''
import re
import joj.lib.utils as utils
import joj.lib.config_file_parser as config_file_parser
from ConfigParser import NoOptionError, NoSectionError
from joj.lib.base import config
import logging
log = logging.getLogger(__name__)
class StatusBuilder(object):
'''
Extracts the initial setup information from the config file and the session
and uses it to generate the initial setup json to be used by the javascript
'''
def __init__(self):
'''
Constructor
'''
self.fiparser = config_file_parser.FurtherInfoConfigParser()
self.displayOptionsParser = config_file_parser.DisplayOptionsConfigParser()
self.userInterfaceConfigParser = config_file_parser.UserInterfaceConfigParser()
def getCurrentStatus(self, page=None):
status = {}
status['WMSEndpointsList'] = self._getWMSEndpointList()
status['HiddenDisplayOptions'] = self._getHiddenDisplayOptions()
status['DefaultLayerParms'] = self._getDefaultParams()
status['OutlineSettings'] = self._getOutlineSettings()
status['FurtherInfoLinks'] = self._getFurtherInfoLinks(page)
status['ViewDataUserInterfaceConfig'] = self._getUserInterfaceConfig('viewdata')
status['CustomTextOptions'] = self._getCustomTextOptions()
status['AnimationOptions'] = self._getAnimationOptions()
status['BannerOptions'] = self._getBannerOptions()
status['FigureOptions'] = self._getFigureOptions()
status['LogoOptions'] = self._getLogoOptions()
status['MapOptions'] = self._getMapOptions()
status['DataOptions'] = self._getDataOptions()
#log.debug('STATUS CUSTOM TEXT %s'%status['CustomTextOptions'])
return status
def _getWMSEndpointList(self):
econfig = config_file_parser.EndpointConfigFileParser()
endpointList = econfig.buildEndpointList('wmsviz')
log.debug("endpointList = %s" % (endpointList,))
wmsList = []
if endpointList is not None:
for e in endpointList:
if e['service'] == 'COWS':
try:
for linkName, linkDict in utils.parseCowsCatalog(e['url']):
if 'WMS' in linkDict.keys():
wmsList.append(
{'service':'WMS',
'url':linkDict['WMS'],
'name':linkName}
)
except:
log.exception("An error occurred while reading cows catalog at %s"\
% (e['url'],))
elif e['service'] == 'WMS':
wmsList.append(e)
return wmsList
def _getHiddenDisplayOptions(self):
return self.displayOptionsParser.getHideOptions('wmsviz')
def _getDefaultParams(self):
return self.displayOptionsParser.getDefaultOptions('wmsviz')
def _getOutlineSettings(self):
outlineConfigParser = config_file_parser.OutlineLayersConfigParser()
return outlineConfigParser.getOutlineLayer('wmsviz')
#
def _getFurtherInfoLinks(self, sectionName):
return self.fiparser.getFurtherInfoItems('wmsviz')
def _getUserInterfaceConfig(self, page):
return self.userInterfaceConfigParser.getUserInterfaceOptions(page)
def _getCustomTextOptions(self):
CustomTextOptions = {}
CustomTextOptions['abouttext'] = self._getUserInterfaceOption('customtext', 'abouttext', '', concatenateLines=True)
CustomTextOptions['maptitle'] = self._getUserInterfaceOption('customtext', 'maptitle', 'Map')
return CustomTextOptions
def _getAnimationOptions(self):
"""Returns the options from the animation section.
"""
animationOptions = {}
animationOptions['minheight'] = self._getUserInterfaceOption('animation', 'height.min', 200)
animationOptions['maxheight'] = self._getUserInterfaceOption('animation', 'height.max', 2048)
animationOptions['defaultheight'] = self._getUserInterfaceOption('animation', 'height.default', 900)
animationOptions['minwidth'] = self._getUserInterfaceOption('animation', 'width.min', 200)
animationOptions['maxwidth'] = self._getUserInterfaceOption('animation', 'width.max', 2048)
animationOptions['defaultwidth'] = self._getUserInterfaceOption('animation', 'width.default', 1200)
animationOptions['maxnumbersteps'] = self._getUserInterfaceOption('animation', 'numbersteps.max', 100)
animationOptions['defaultnumbersteps'] = self._getUserInterfaceOption('animation', 'numbersteps.default', 5)
animationOptions['browsertimeout'] = self._getUserInterfaceOption('animation', 'browser.timeout', 300)
animationOptions['style'] = self._getUserInterfaceOption('animation', 'style', '')
return animationOptions
def _getBannerOptions(self):
"""Returns the options from the banner section.
"""
bannerOptions = {}
bannerOptions['height'] = self._getUserInterfaceOption('banner', 'height', 125)
bannerOptions['html'] = self._getUserInterfaceOption('banner', 'html', '', concatenateLines=True)
bannerOptions['style'] = self._getUserInterfaceOption('banner', 'style', '')
return bannerOptions
def _getFigureOptions(self):
"""Returns the options from the figure section.
"""
figureOptions = {}
figureOptions['minheight'] = self._getUserInterfaceOption('figure', 'height.min', 200)
figureOptions['maxheight'] = self._getUserInterfaceOption('figure', 'height.max', 2048)
figureOptions['defaultheight'] = self._getUserInterfaceOption('figure', 'height.default', 900)
figureOptions['minwidth'] = self._getUserInterfaceOption('figure', 'width.min', 200)
figureOptions['maxwidth'] = self._getUserInterfaceOption('figure', 'width.max', 2048)
figureOptions['defaultwidth'] = self._getUserInterfaceOption('figure', 'width.default', 1200)
figureOptions['style'] = self._getUserInterfaceOption('figure', 'style', '')
return figureOptions
def _getLogoOptions(self):
"""Returns the options from the logo section.
"""
logoOptions = {}
logoOptions['height'] = self._getUserInterfaceOption('logo', 'height', 95)
logoOptions['html'] = self._getUserInterfaceOption('logo', 'html', '', concatenateLines=True)
logoOptions['style'] = self._getUserInterfaceOption('logo', 'style', '')
return logoOptions
def _getMapOptions(self):
"""Returns the options from the map section.
"""
mapOptions = {}
mapOptions['tilesize'] = self._getUserInterfaceOption('map', 'tilesize', None)
mapOptions['numberZoomLevels'] = self._getUserInterfaceOption('map', 'numberzoomlevels', None)
return mapOptions
def _getDataOptions(self):
"""Returns the options from the data section.
"""
dataOptions = {}
dataOptions['maxnumbersteps'] = self._getUserInterfaceOption('data', 'numbersteps.max', 100)
dataOptions['defaultnumbersteps'] = self._getUserInterfaceOption('data', 'numbersteps.default', 5)
dataOptions['browsertimeout'] = self._getUserInterfaceOption('data', 'browser.timeout', 300)
return dataOptions
def _getUserInterfaceOption(self, section, option, default, concatenateLines=False):
"""Returns the value of an option in a section, or a default value if the option is not
found.
"""
try:
result = self.userInterfaceConfigParser.getConfigOption(section, option)
except (NoOptionError, NoSectionError, TypeError):
result = default
# This can be used to allow multi-line values in the configuration file, but collapse it to
# a single line for JSON parsing.
if concatenateLines:
result = re.sub('[\n\r]+', ' ', result)
return result
"""
initialStatusObject (made of primatives so it can be transformed into json
{
'HideDisplayOptions':xxx,
'DefaultDisplayOptions':xxx,
'WMSEndpointsList':xxx,
'baselayerParams':xxx,
'baselayerUrl':xxx,
'selectedEndpoints':xxx,
'selectedLayers':xxx,
}
"""
|
NERC-CEH/jules-jasmin
|
majic/joj/lib/status_builder.py
|
Python
|
gpl-2.0
| 8,747
|
from flask import request
from flask_restplus import Resource
from skf.api.security import security_headers, validate_privilege
from skf.api.code.business import update_code_item
from skf.api.code.serializers import code_properties, message
from skf.api.code.parsers import authorization
from skf.api.restplus import api
from skf.api.security import log, val_num, val_alpha, val_alpha_num, val_alpha_num_special
ns = api.namespace('code', description='Operations related to code example items')
@ns.route('/update/<int:id>')
@api.doc(params={'id': 'The code item id'})
@api.response(404, 'Validation error', message)
class CodeItemUpdate(Resource):
@api.expect(authorization, code_properties)
@api.marshal_with(message, 'Success')
@api.response(400, 'Validation Error', message)
def put(self, id):
"""
Update a code example item.
* Privileges required: **edit**
"""
data = request.json
val_alpha_num_special(data.get('title'))
val_num(id)
validate_privilege(self, 'edit')
result = update_code_item(id, data)
return result, 200, security_headers()
|
blabla1337/skf-flask
|
skf/api/code/endpoints/code_item_update.py
|
Python
|
agpl-3.0
| 1,148
|
import unittest
import math
from kivy3 import Vector3, Vector4, Vector2
# good values for vector 3, 4, 12, 84
class Vector3Test(unittest.TestCase):
def test_create(self):
v = Vector3(1, 2, 3)
self.assertEquals(v[0], 1)
self.assertEquals(v[1], 2)
self.assertEquals(v[2], 3)
v = Vector3([4, 5, 6])
self.assertEquals(v[0], 4)
self.assertEquals(v[1], 5)
self.assertEquals(v[2], 6)
try:
Vector3(1, 2, 3, 4)
assert False, "This shold not reached"
except:
pass
try:
Vector3([3, 4, 2, 1])
assert False, "This shold not reached"
except:
pass
def test_add(self):
v1 = Vector3(1, 2, 3)
v2 = Vector3(4, 5, 6)
v = v1 + v2
self.assertEqual(v, [5, 7, 9])
v1.add(v2)
self.assertEqual(v1, [5, 7, 9])
self.assertEqual(v + 2, [7, 9, 11])
def test_sub(self):
v1 = Vector3(4, 5, 6)
v2 = Vector3(1, 2, 3)
v = v1 - v2
self.assertEqual(v, [3, 3, 3])
v1.sub(v2)
self.assertEqual(v1, [3, 3, 3])
self.assertEqual(v - 3, [0, 0, 0])
def test_multiply(self):
v1 = Vector3(5, 6, 7)
v2 = Vector3(2, 2, 2)
self.assertEqual(v1 * v2, [10., 12., 14.])
v1.multiply(v2)
self.assertEqual(v1, [10., 12., 14.])
def test_divide(self):
v1 = Vector3(6, 4, 8)
v2 = Vector3(2, 2, 2)
self.assertEqual(v1 / v2, [3., 2., 4.])
v1.divide(v2)
self.assertEqual(v1, [3., 2., 4.])
def test_minmax(self):
v = Vector3(6, 7, 4)
v1 = Vector3(3, 5, 8)
v.min(v1)
self.assertEqual(v, [3, 5, 4])
v2 = Vector3(1, 7, 6)
v.max(v2)
self.assertEqual(v, [3, 7, 6])
def test_clamp(self):
v1 = Vector3(1, 2, 3)
v2 = Vector3(3, 4, 6)
v = Vector3(0, 5, 4)
v.clamp(v1, v2)
self.assertEqual(v, [1, 4, 4])
def test_negate(self):
v = Vector3(2, 2, 2)
v.negate()
self.assertEqual(v, [-2, -2, -2])
def test_length(self):
v = Vector3(3, 12, 4)
v = Vector3(12, 4, 3)
self.assertEqual(v.length(), 13)
self.assertEqual(v.length_sq(), 13*13)
def test_angle(self):
v1 = Vector3(0, 0, 1)
v2 = Vector3(0, 1, 0)
angle = v1.angle(v2)
self.assertEqual(math.degrees(angle), 90.0)
v1 = Vector3(0, 0, 1)
v2 = Vector3(0, 0, -1)
angle = v1.angle(v2)
self.assertEqual(math.degrees(angle), 180.0)
def test_distance(self):
v1 = Vector3(2, 1, 6)
v2 = Vector3(2, 5, 6)
self.assertEqual(v1.distance(v2), 4)
def test_attributes(self):
v = Vector3(0, 0, 0)
v.x = 4
self.assertEqual(v[0], v.x)
self.assertEqual(v[0], 4)
v.z = 6
self.assertEqual(v[2], v.z)
self.assertEqual(v[2], 6)
try:
t = v.v
assert False, "executing of this string is error"
except AttributeError:
pass
class Vector2Test(unittest.TestCase):
def test_create(self):
v = Vector2(1, 2)
try:
v = Vector2(1, 2, 3)
assert False, "This should not be normally reached"
except:
pass # test is passed normally
def test_attrbutes(self):
v = Vector2(0, 0)
v.x = 4
self.assertEqual(v[0], v.x)
self.assertEqual(v[0], 4)
v.y = 7
self.assertEqual(v[1], v.y)
self.assertEqual(v[1], 7)
try:
t = v.z
assert False, "executing of this string is error"
except AttributeError:
pass
if __name__ == '__main__':
unittest.main()
|
nskrypnik/kivy3
|
tests/test_vectors.py
|
Python
|
mit
| 3,833
|
"""
We want to see how accurate the derivatives are as we increase the number of samples.
"""
import numpy
import logging
import sys
from sandbox.recommendation.MaxLocalAUCCython import derivativeUi, derivativeUiApprox
from sandbox.util.SparseUtils import SparseUtils
from sandbox.util.SparseUtilsCython import SparseUtilsCython
import matplotlib
matplotlib.use("GTK3Agg")
import matplotlib.pyplot as plt
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.random.seed(21)
numpy.set_printoptions(precision=3, suppress=True, linewidth=150)
#Create a low rank matrix
m = 500
n = 1000
k = 20
X = SparseUtils.generateSparseBinaryMatrix((m,n), k, 0.95)
logging.debug("Number of non zero elements: " + str(X.nnz))
lmbda = 0.0
numAucSamples = 1000
u = 0.1
sigma = 1
nu = 1
nuBar = 1
project = False
omegaList = SparseUtils.getOmegaList(X)
U = numpy.random.rand(m, k)
V = numpy.random.rand(n, k)
r = SparseUtilsCython.computeR(U, V, 1-u, numAucSamples)
numPoints = 50
sampleSize = 10
numAucSamplesList = numpy.linspace(1, 50, numPoints)
norms = numpy.zeros(numPoints)
originalU = U.copy()
for s in range(sampleSize):
print(s)
i = numpy.random.randint(m)
rowInds = numpy.array([i], numpy.uint)
vec1 = derivativeUi(X, U, V, omegaList, i, lmbda, r)
vec1 = vec1/numpy.linalg.norm(vec1)
for j, numAucSamples in enumerate(numAucSamplesList):
U = originalU.copy()
vec2 = derivativeUiApprox(X, U, V, omegaList, i, numAucSamples, lmbda, r, nu)
norms[j] += numpy.abs(numpy.inner(vec1, vec2))
norms /= sampleSize
plt.plot(numAucSamplesList, norms)
plt.show()
|
charanpald/wallhack
|
wallhack/rankingexp/DerivativeExp.py
|
Python
|
gpl-3.0
| 1,658
|
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
# from base_model import L2Norm, ResNet
from resnet import ResNet, resnet34
from nhwc.conv import Conv2d_NHWC
class SSD300(nn.Module):
"""
Build a SSD module to take 300x300 image input,
and output 8732 per class bounding boxes
label_num: number of classes (including background 0)
"""
def __init__(self, args, label_num, use_nhwc=False, pad_input=False, bn_group=1, pretrained=True):
super(SSD300, self).__init__()
self.label_num = label_num
self.use_nhwc = use_nhwc
self.pad_input = pad_input
self.bn_group = bn_group
# Explicitly RN34 all the time
out_channels = 256
out_size = 38
self.out_chan = [out_channels, 512, 512, 256, 256, 256]
# self.model = ResNet(self.use_nhwc, self.pad_input, self.bn_group)
rn_args = {
'bn_group' : bn_group,
'pad_input' : pad_input,
'nhwc' : use_nhwc,
'pretrained' : pretrained,
'ssd_mods' : True,
}
self.model = resnet34(**rn_args)
self._build_additional_features()
padding_channels_to = 8
self._build_multibox_heads(use_nhwc, padding_channels_to)
# after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2
# classifer 1, 2, 3, 4, 5 ,6
# intitalize all weights
with torch.no_grad():
self._init_weights()
def _build_multibox_heads(self, use_nhwc, padding_channels_to=8):
self.num_defaults = [4, 6, 6, 6, 4, 4]
self.mbox = []
self.padding_amounts = []
if self.use_nhwc:
conv_fn = Conv2d_NHWC
else:
conv_fn = nn.Conv2d
# Multiple to pad channels to
for nd, oc in zip(self.num_defaults, self.out_chan):
# Horizontally fuse loc and conf convolutions
my_num_channels = nd*(4+self.label_num)
if self.use_nhwc:
# Want to manually pad to get HMMA kernels in NHWC case
padding_amount = padding_channels_to - (my_num_channels % padding_channels_to)
else:
padding_amount = 0
self.padding_amounts.append(padding_amount)
self.mbox.append(conv_fn(oc, my_num_channels + padding_amount, kernel_size=3, padding=1))
self.mbox = nn.ModuleList(self.mbox)
"""
Output size from RN34 is always 38x38
"""
def _build_additional_features(self):
self.additional_blocks = []
if self.use_nhwc:
conv_fn = Conv2d_NHWC
else:
conv_fn = nn.Conv2d
def build_block(input_channels, inter_channels, out_channels, stride=1, pad=0):
return nn.Sequential(
conv_fn(input_channels, inter_channels, kernel_size=1),
nn.ReLU(inplace=True),
conv_fn(inter_channels, out_channels, kernel_size=3, stride=stride, padding=pad),
nn.ReLU(inplace=True)
)
strides = [2, 2, 2, 1, 1]
intermediates = [256, 256, 128, 128, 128]
paddings = [1, 1, 1, 0, 0]
for i, im, o, stride, pad in zip(self.out_chan[:-1], intermediates, self.out_chan[1:], strides, paddings):
self.additional_blocks.append(build_block(i, im, o, stride=stride, pad=pad))
self.additional_blocks = nn.ModuleList(self.additional_blocks)
def _init_additional_weights(self):
addn_blocks = [*self.additional_blocks]
# Need to handle additional blocks differently in NHWC case due to xavier initialization
for layer in addn_blocks:
for param in layer.parameters():
if param.dim() > 1:
if self.use_nhwc:
# xavier_uniform relies on fan-in/-out, so need to use NCHW here to get
# correct values (K, R) instead of the correct (K, C)
nchw_param_data = param.data.permute(0, 3, 1, 2).contiguous()
nn.init.xavier_uniform_(nchw_param_data)
# Now permute correctly-initialized param back to NHWC
param.data.copy_(nchw_param_data.permute(0, 2, 3, 1).contiguous())
else:
nn.init.xavier_uniform_(param)
def _init_multibox_weights(self):
layers = [ *self.mbox ]
for layer, default, padding in zip(layers, self.num_defaults, self.padding_amounts):
for param in layer.parameters():
if param.dim() > 1 and self.use_nhwc:
# Need to be careful - we're initialising [loc, conf, pad] with
# all 3 needing to be treated separately
conf_channels = default * self.label_num
loc_channels = default * 4
pad_channels = padding
# Split the parameter into separate parts along K dimension
conf, loc, pad = param.data.split([conf_channels, loc_channels, pad_channels], dim=0)
# Padding should be zero
pad_data = torch.zeros_like(pad.data)
def init_loc_conf(p):
p_data = p.data.permute(0, 3, 1, 2).contiguous()
nn.init.xavier_uniform_(p_data)
p_data = p_data.permute(0, 2, 3, 1).contiguous()
return p_data
# Location and confidence data
loc_data = init_loc_conf(loc)
conf_data = init_loc_conf(conf)
# Put the full weight together again along K and copy
param.data.copy_(torch.cat([conf_data, loc_data, pad_data], dim=0))
elif param.dim() > 1:
nn.init.xavier_uniform_(param)
def _init_weights(self):
self._init_additional_weights()
self._init_multibox_weights()
# Shape the classifier to the view of bboxes
def bbox_view(self, src, mbox):
locs = []
confs = []
for s, m, num_defaults, pad in zip(src, mbox, self.num_defaults, self.padding_amounts):
mm = m(s)
conf_channels = num_defaults * self.label_num
loc_channels = num_defaults * 4
if self.use_nhwc:
conf, loc, _ = mm.split([conf_channels, loc_channels, pad], dim=3)
conf, loc = conf.contiguous(), loc.contiguous()
# We now have unfused [N, H, W, C]
# Layout is a little awkward here.
# Take C = c * d, then we actually have:
# [N, H, W, c*d]
# flatten HW first:
# [N, H, W, c*d] -> [N, HW, c*d]
locs.append(
loc.view(s.size(0), -1, 4 * num_defaults).permute(0, 2, 1).contiguous().view(loc.size(0), 4, -1))
confs.append(
conf.view(s.size(0), -1, self.label_num * num_defaults).permute(0, 2, 1).contiguous().view(conf.size(0), self.label_num, -1))
else:
conf, loc = mm.split([conf_channels, loc_channels], dim=1)
conf, loc = conf.contiguous(), loc.contiguous()
# flatten the anchors for this layer
locs.append(loc.view(s.size(0), 4, -1))
confs.append(conf.view(s.size(0), self.label_num, -1))
cat_dim = 2
locs, confs = torch.cat(locs, cat_dim), torch.cat(confs, cat_dim)
return locs, confs
def forward(self, data):
layers = self.model(data)
# last result from network goes into additional blocks
x = layers
# If necessary, transpose back to NCHW
additional_results = []
for i, l in enumerate(self.additional_blocks):
x = l(x)
additional_results.append(x)
# do we need the l2norm on the first result?
src = [layers, *additional_results]
# Feature Map 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4
locs, confs = self.bbox_view(src, self.mbox)
# For SSD 300, shall return nbatch x 8732 x {nlabels, nlocs} results
return locs, confs
|
mlperf/training_results_v0.7
|
NVIDIA/benchmarks/ssd/implementations/pytorch/ssd300.py
|
Python
|
apache-2.0
| 8,862
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
# Support for fake joystick/gamepad during development
# if no 'real' joystick/gamepad is available use keyboard emulation
# 'ctrl' + 'alt' + numberKey
from __future__ import absolute_import, division, print_function
from psychopy import event
class VirtualJoystick(object):
def __init__(self, device_number):
self.device_number = device_number
self.numberKeys = ['0','1','2','3','4','5','6','7','8','9']
self.modifierKeys = ['ctrl','alt']
self.mouse = event.Mouse()
event.Mouse(visible=False)
def getNumButtons(self):
return(len(self.numberKeys))
def getAllButtons(self):
keys = event.getKeys(keyList=self.numberKeys, modifiers=True)
values = [key for key, modifiers in keys if all([modifiers[modKey] for modKey in self.modifierKeys])]
self.state = [key in values for key in self.numberKeys]
mouseButtons = self.mouse.getPressed()
self.state[:len(mouseButtons)] = [a or b != 0 for (a,b) in zip(self.state, mouseButtons)]
return(self.state)
def getX(self):
(x, y) = self.mouse.getPos()
return x
def getY(self):
(x, y) = self.mouse.getPos()
return y
|
psychopy/versions
|
psychopy/experiment/components/joystick/virtualJoystick.py
|
Python
|
gpl-3.0
| 1,433
|
import numpy as np
import active
import config
def compute_accuracy(w, X_testing, Y_testing):
size = X_testing.shape[0]
predictions = active.linear_predictor(X_testing, w)
results = predictions == Y_testing
correct = np.count_nonzero(results)
accuracy = correct/size
return accuracy
def weights_matrix(n, iterations, X_training, Y_training, center='ac',
sample=1, M=None):
testing = 3
matrix_of_weights = []
for i in range(n):
weights = active.active(X_training, Y_training, iterations, center=center,
sample = sample, testing=testing, M=M)[2]
matrix_of_weights.append(weights)
return matrix_of_weights
def experiment(n, iterations, X_testing, Y_testing, X_training, Y_training,
center='ac', sample = 1, M=None):
config.reset()
testing=3
matrix_of_weights = weights_matrix(n, iterations, X_training, Y_training,
center=center, sample=sample, M=M)
matrix_of_accuracies = []
for weights in matrix_of_weights:
accuracies = []
for weight in weights:
accuracy = compute_accuracy(weight, X_testing, Y_testing)
accuracies.append(accuracy)
matrix_of_accuracies.append(accuracies)
matrix_of_accuracies = np.array(matrix_of_accuracies)
sum_of_accuracies = matrix_of_accuracies.sum(axis=0)
average_accuracies = sum_of_accuracies/n
return average_accuracies
|
alasdairtran/mclearn
|
projects/david/lab/experiment.py
|
Python
|
bsd-3-clause
| 1,510
|
# IVLE - Informatics Virtual Learning Environment
# Copyright (C) 2007-2009 The University of Melbourne
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from storm.locals import Store
from ivle.database import (
Enrolment, Offering, ProjectSet, Project, Semester, Subject, User)
from ivle.webapp import ApplicationRoot
from ivle.webapp.publisher import ROOT
from ivle.webapp.publisher.decorators import forward_route, reverse_route
@forward_route(ApplicationRoot, argc=1)
def root_to_user(root, segment):
if not segment.startswith('~'):
return None
return User.get_by_login(root.store, segment[1:])
@forward_route(ApplicationRoot, 'subjects', argc=1)
def root_to_subject(root, name):
return root.store.find(Subject, short_name=name).one()
@forward_route(ApplicationRoot, '+semesters', argc=2)
def root_to_semester(root, year, semester):
return root.store.find(Semester, year=year, url_name=semester).one()
@forward_route(Subject, argc=2)
def subject_to_offering(subject, year, semester):
return subject.offering_for_semester(year, semester)
@forward_route(Offering, '+projects', argc=1)
def offering_to_project(offering, name):
return Store.of(offering).find(Project,
Project.short_name == name,
Project.project_set_id == ProjectSet.id,
ProjectSet.offering == offering).one()
@forward_route(Offering, '+projectsets', argc=1)
def offering_to_projectset(offering, name):
try:
ps_id = int(name)
except ValueError:
return None
return Store.of(offering).find(ProjectSet,
ProjectSet.id == ps_id,
ProjectSet.offering == offering).one()
@forward_route(Offering, '+enrolments', argc=1)
def offering_to_enrolment(offering, login):
return Store.of(offering).find(Enrolment,
Enrolment.offering == offering,
Enrolment.user_id == User.id,
User.login == login).one()
@reverse_route(User)
def user_url(user):
return (ROOT, '~' + user.login)
@reverse_route(Subject)
def subject_url(subject):
return (ROOT, ('subjects', subject.short_name))
@reverse_route(Semester)
def semester_url(semester):
return (ROOT, ('+semesters', semester.year, semester.url_name))
@reverse_route(Offering)
def offering_url(offering):
return (offering.subject, (offering.semester.year,
offering.semester.url_name))
@reverse_route(ProjectSet)
def projectset_url(project_set):
return (project_set.offering, ('+projectsets', str(project_set.id)))
@reverse_route(Project)
def project_url(project):
return (project.project_set.offering, ('+projects', project.short_name))
@reverse_route(Enrolment)
def enrolment_url(enrolment):
return (enrolment.offering, ('+enrolments', enrolment.user.login))
|
dcoles/ivle
|
ivle/webapp/admin/publishing.py
|
Python
|
gpl-2.0
| 3,626
|
from PyQt4 import QtCore, QtGui
from MainWindow import *
from MorphogenesisImageData import MorphogenesisImageData
import sys, os, time
class WorkerThread ( QtCore.QThread ):
def __init__(self, controller, texture, maxIterations):
QtCore.QThread.__init__(self)
self.controller = controller
self.texture = texture
self.maxIterations = maxIterations
self.halt = False
def run ( self ):
while not self.halt and (self.maxIterations == 0 or self.texture.iteration < self.maxIterations):
self.texture.step()
time.sleep(0.001)
class Controller:
def __init__(self, window):
self.window = window
self.worker = None
self.texture = None
self.running = False
self.dumpAtEndPath = None
self.timer = QtCore.QTimer()
def setThreadRunning(self, flag):
self.running = flag
self.window.ui.pauseButton.setEnabled(flag)
self.window.ui.runButton.setEnabled(not flag)
def setThreadFinished(self):
self.setThreadRunning(False)
if self.dumpAtEndPath is not None:
self.window.ui.widget.grabFrameBuffer().save(os.path.join(self.dumpAtEndPath, self.texture.imageName()) + '.png')
QtCore.QCoreApplication.instance().quit()
def saveScreenshot(self):
dialog = QtGui.QFileDialog()
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.selectFile(self.texture.imageName())
if dialog.exec_() == QtGui.QDialog.Accepted:
f = dialog.selectedFiles()[0]
self.window.ui.widget.grabFrameBuffer().save(f)
def updateUI(self):
self.window.ui.widget.updateGL()
if self.texture is not None:
self.window.ui.ipsLabel.setText("%1.f IPS" % (float(self.texture.iteration - self.lastIteration)/(time.time() - self.lastIterationTime)))
self.window.ui.iterationLabel.setText("Iteration %d" % self.texture.iteration)
self.lastIteration = self.texture.iteration
self.lastIterationTime = time.time()
def awake(self):
QtCore.QObject.connect(self.window.ui.runButton, QtCore.SIGNAL("clicked()"), self.run)
QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout()"), self.updateUI )
QtCore.QObject.connect(self.window.ui.initButton, QtCore.SIGNAL("clicked()"), self.init)
QtCore.QObject.connect(self.window.ui.stepButton, QtCore.SIGNAL("clicked()"), self.step)
QtCore.QObject.connect(self.window.ui.pauseButton, QtCore.SIGNAL("clicked()"), self.pause)
QtCore.QObject.connect(self.window.ui.debugInfoMenuItem, QtCore.SIGNAL("triggered()"), self.logDebugInfo)
QtCore.QObject.connect(self.window.ui.saveScreenshotMenuItem, QtCore.SIGNAL("triggered()"), self.saveScreenshot)
QtCore.QObject.connect(QtCore.QCoreApplication.instance(), QtCore.SIGNAL("aboutToQuit()"), self.cleanup)
self.setThreadRunning(False)
def pause(self):
self.timer.stop()
if self.worker is not None:
self.worker.halt = True
def cleanup(self):
if self.worker is not None:
self.worker.halt = True
def init(self):
self.pause()
da = float(self.window.ui.daField.text())
db = float(self.window.ui.dbField.text())
ds = float(self.window.ui.dsField.text())
beta = float(self.window.ui.betaField.text())
width = self.window.ui.widthSlider.value()
height = self.window.ui.heightSlider.value()
self.texture = MorphogenesisImageData(width, height, ds, da, db, beta)
if self.window.ui.initComboBox.currentIndex() == 0:
self.texture.generate('random')
else:
self.texture.generate('stripe')
self.window.ui.widget.setTexture(self.texture)
self.window.ui.widget.makeCurrent()
def step(self):
if self.texture == None:
self.init()
self.texture.step()
self.window.ui.widget.updateGL()
def run(self, maxIterations = 0, dumpAtEndPath = None):
if self.texture == None:
self.init()
if not self.running:
self.dumpAtEndPath = dumpAtEndPath
self.lastIteration = self.texture.iteration
self.lastIterationTime = time.time()
self.worker = WorkerThread(self, self.texture, maxIterations)
QtCore.QObject.connect(self.worker, QtCore.SIGNAL("finished()"), self.setThreadFinished)
self.worker.start()
self.timer.start(40)
self.setThreadRunning(True)
def logDebugInfo(self):
if self.texture is not None:
self.texture.logDebugInfo()
def setOptions(self, options):
self.window.ui.daField.setText(str(options.D_a))
self.window.ui.dbField.setText(str(options.D_b))
self.window.ui.dsField.setText(str(options.D_s))
self.window.ui.betaField.setText(str(options.beta_i))
self.window.ui.widthSlider.setValue(options.width)
self.window.ui.heightSlider.setValue(options.height)
|
thomasdeniau/pyfauxfur
|
Controller.py
|
Python
|
bsd-3-clause
| 5,163
|
import os.path
import json,codecs
import unreal_engine as ue
from unreal_engine import FVector,FRotator
from unreal_engine.classes import Actor, Pawn, Character, ProjectileMovementComponent, PawnSensingComponent, StaticMesh
from unreal_engine.classes import StaticMeshComponent, StaticMeshActor, PointLightComponent
class ObjectLoader:
def begin_play(self):
ue.log("begin object loader")
self.pawn = self.uobject.get_owner()
#self.world = ue.get_editor_world()
self.datapath = str(self.pawn.get_property('datafilename'))
self.objects = []
ue.log("------------------")
def loadAndSpawnObjects(self):
ue.log("+++++++++++++++++++")
ue.log("loadAndSpawnObjects")
ue.log("checking for "+self.datapath)
if os.path.exists(self.datapath):
with codecs.open(self.datapath,"r","utf-8") as f:
data = json.loads(f.read())
ue.log(data)
for obj in data:
objclass = ue.find_class(obj["type"])
#ue.log(str(type(objclass))+str(objclass)+"="+obj["json"])
objinst = self.uobject.actor_spawn(objclass, FVector(0, 0, 0),FRotator(0, 0, 0))
jsonstr = obj["json"]
self.objects.append(objinst)
objinst.call_function("loadjson",jsonstr)
ue.log("------------------")
def clear(self):
self.objects.clear()
def add(self):
self.objects.append(self.pawn.get_property('whattoadd'))
#ue.log(len(self.objects))
def printall(self):
ue.log(len(self.objects))
def saveAllObjects(self):
with codecs.open(self.datapath,"w","utf-8") as f:
res = []
for obj in self.objects:
res.append({"type":obj.get_class().get_name(),"json":obj.savejson()[0]})
f.write(json.dumps(res))
def tick(self, delta_time):
pass
|
meahmadi/ThreeDHighway
|
Content/Scripts/ObjectLoader.py
|
Python
|
apache-2.0
| 1,690
|
import json
import unittest
import ipuz
class IPUZBaseTestCase(unittest.TestCase):
def validate_puzzle(self, json_data, expected_exception, **kwargs):
with self.assertRaises(ipuz.IPUZException) as cm:
ipuz.read(json.dumps(json_data), **kwargs)
self.assertEqual(str(cm.exception), expected_exception)
def validate(self, expected_exception):
self.validate_puzzle(self.puzzle, expected_exception)
class IPUZReadTestCase(IPUZBaseTestCase):
def test_read_detects_invalid_ipuz_data(self):
with self.assertRaises(ipuz.IPUZException) as cm:
ipuz.read("this is wrong")
self.assertEqual(str(cm.exception), "No valid JSON could be found")
def test_read_detects_empty_input(self):
with self.assertRaises(ipuz.IPUZException) as cm:
ipuz.read(None)
self.assertEqual(str(cm.exception), "No valid JSON could be found")
with self.assertRaises(ipuz.IPUZException) as cm:
ipuz.read("")
self.assertEqual(str(cm.exception), "No valid JSON could be found")
def test_read_detects_non_string_input(self):
with self.assertRaises(ipuz.IPUZException) as cm:
ipuz.read(3)
self.assertEqual(str(cm.exception), "No valid JSON could be found")
def test_read_detects_valid_json_but_not_dict_json(self):
with self.assertRaises(ipuz.IPUZException) as cm:
ipuz.read('["version", "kind"]')
self.assertEqual(str(cm.exception), "No valid JSON could be found")
def test_read_raises_for_missing_version_field(self):
self.validate_puzzle({}, "Mandatory field version is missing")
def test_read_raises_for_missing_kind_field(self):
self.validate_puzzle({
"version": "http://ipuz.org/v1",
}, "Mandatory field kind is missing")
def test_read_raises_for_invalid_version_field(self):
self.validate_puzzle({
"version": "invalid_version",
"kind": ["http://ipuz.org/invalid", ]
}, "Invalid version value found")
def test_read_raises_for_invalid_zero_version_number(self):
self.validate_puzzle({
"version": "http://ipuz.org/v0",
"kind": ["http://ipuz.org/invalid", ]
}, "Invalid version value found")
def test_read_raises_for_unsupported_version_field(self):
self.validate_puzzle({
"version": "http://ipuz.org/v5",
"kind": ["http://ipuz.org/invalid", ]
}, "Unsupported version value found")
def test_read_allows_jsonp_callback_function(self):
result = ipuz.read("ipuz(" + json.dumps({
"version": "http://ipuz.org/v1",
"kind": ["http://ipuz.org/invalid", ]
}) + ")")
self.assertEqual(result['version'], "http://ipuz.org/v1")
result = ipuz.read("ipuz_callback_function(" + json.dumps({
"version": "http://ipuz.org/v1",
"kind": ["http://ipuz.org/invalid", ]
}) + ")")
self.assertEqual(result['version'], "http://ipuz.org/v1")
def test_invalid_kind_type(self):
self.validate_puzzle({
"version": "http://ipuz.org/v1",
"kind": 3,
}, "Invalid kind value found")
def test_invalid_empty_kind(self):
self.validate_puzzle({
"version": "http://ipuz.org/v1",
"kind": [],
}, "Invalid kind value found")
def test_invalid_kind_is_not_a_string(self):
self.validate_puzzle({
"version": "http://ipuz.org/v1",
"kind": [3],
}, "Invalid kind value found")
def test_invalid_kind_contains_empty_string(self):
self.validate_puzzle({
"version": "http://ipuz.org/v1",
"kind": [""],
}, "Invalid kind value found")
def test_unsupported_kind_value_found(self):
self.validate_puzzle({
"version": "http://ipuz.org/v1",
"kind": ["http://ipuz.org/crossword"]
}, "Unsupported kind value found",
puzzlekinds=["http://ipuz.org/sudoku"]
)
def test_unsupported_kind_value_found_based_on_version(self):
self.validate_puzzle({
"version": "http://ipuz.org/v1",
"kind": ["http://ipuz.org/crossword#2"]
}, "Unsupported kind value found",
puzzlekinds=["http://ipuz.org/crossword#1"]
)
def test_unsupported_kind_value_with_multiple_kinds(self):
self.validate_puzzle({
"version": "http://ipuz.org/v1",
"kind": ["http://ipuz.org/crossword#1", "http://ipuz.org/invalid"]
}, "Unsupported kind value found",
puzzlekinds=["http://ipuz.org/crossword#1"]
)
class IPUZFieldValidatorTestCase(IPUZBaseTestCase):
def setUp(self):
self.puzzle = {
"version": "http://ipuz.org/v1",
"kind": ["http://ipuz.org/invalid"],
}
def validate(self, expected_exception):
self.validate_puzzle(self.puzzle, expected_exception)
def test_validate_date_invalid_format(self):
self.puzzle["date"] = "14/01/2014"
self.validate("Invalid date format: 14/01/2014")
def test_copyright_is_string(self):
self.puzzle["copyright"] = 3
self.validate("Invalid copyright value found")
def test_publisher_is_string(self):
self.puzzle["publisher"] = 3
self.validate("Invalid publisher value found")
def test_publication_is_string(self):
self.puzzle["publication"] = 3
self.validate("Invalid publication value found")
def test_url_is_string(self):
self.puzzle["url"] = 3
self.validate("Invalid url value found")
def test_uniqueid_is_string(self):
self.puzzle["uniqueid"] = 3
self.validate("Invalid uniqueid value found")
def test_title_is_string(self):
self.puzzle["title"] = 3
self.validate("Invalid title value found")
def test_intro_is_string(self):
self.puzzle["intro"] = 3
self.validate("Invalid intro value found")
def test_explanation_is_string(self):
self.puzzle["explanation"] = 3
self.validate("Invalid explanation value found")
def test_annotation_is_string(self):
self.puzzle["annotation"] = 3
self.validate("Invalid annotation value found")
def test_author_is_string(self):
self.puzzle["author"] = 3
self.validate("Invalid author value found")
def test_editor_is_string(self):
self.puzzle["editor"] = 3
self.validate("Invalid editor value found")
def test_notes_is_string(self):
self.puzzle["notes"] = 3
self.validate("Invalid notes value found")
def test_difficulty_is_string(self):
self.puzzle["difficulty"] = 3
self.validate("Invalid difficulty value found")
def test_origin_is_string(self):
self.puzzle["origin"] = 3
self.validate("Invalid origin value found")
def test_block_is_string(self):
self.puzzle["block"] = 3
self.validate("Invalid block value found")
def test_empty_is_string_or_int(self):
self.puzzle["empty"] = True
self.validate("Invalid empty value found")
def test_checksum_is_list_(self):
self.puzzle["checksum"] = 3
self.validate("Invalid checksum value found")
def test_checksum_is_list_of_strings(self):
self.puzzle["checksum"] = [3]
self.validate("Invalid checksum value found")
def test_volatile_is_a_dict(self):
self.puzzle["volatile"] = 3
self.validate("Invalid volatile value found")
def test_volatile_is_a_dict_with_strings(self):
self.puzzle["volatile"] = {"A": 3}
self.validate("Invalid volatile value found")
class IPUZWriteTestCase(IPUZBaseTestCase):
def test_write_produces_json_string_by_default(self):
json_data = {}
result = ipuz.write(json_data)
expected = json.dumps(json_data)
self.assertEqual(result, expected)
def test_write_supports_different_callback_name(self):
json_data = {}
result = ipuz.write(json_data, jsonp=True, callback_name="ipuz_function")
expected = ''.join(['ipuz_function(', json.dumps(json_data), ')'])
self.assertEqual(result, expected)
def test_write_produces_jsonp_with_jsonp_flag(self):
json_data = {}
result = ipuz.write(json_data, jsonp=True)
expected = ''.join(['ipuz(', json.dumps(json_data), ')'])
self.assertEqual(result, expected)
def test_ignores_callback_name_when_json_only(self):
json_data = {}
result = ipuz.write(
json_data,
callback_name="ipuz_function"
)
expected = json.dumps(json_data)
self.assertEqual(result, expected)
class IPUZRoundTripTestCase(IPUZBaseTestCase):
def test_first_ipuz_file_with_json(self):
with open("fixtures/first.ipuz") as f:
data = f.read()
output = ipuz.read(data)
output_string = ipuz.write(output)
second_output = ipuz.read(output_string)
self.assertEqual(output, second_output)
def test_first_ipuz_file_with_jsonp(self):
with open("fixtures/first.ipuz") as f:
data = f.read()
output = ipuz.read(data)
output_string = ipuz.write(output, jsonp=True)
second_output = ipuz.read(output_string)
self.assertEqual(output, second_output)
def test_example_ipuz_file_with_json(self):
with open("fixtures/example.ipuz") as f:
data = f.read()
output = ipuz.read(data)
output_string = ipuz.write(output)
second_output = ipuz.read(output_string)
self.assertEqual(output, second_output)
def test_example_ipuz_file_with_jsonp(self):
with open("fixtures/example.ipuz") as f:
data = f.read()
output = ipuz.read(data)
output_string = ipuz.write(output, jsonp=True)
second_output = ipuz.read(output_string)
self.assertEqual(output, second_output)
def test_v2_ipuz_file_with_json(self):
with open("fixtures/example_v2.ipuz") as f:
data = f.read()
output = ipuz.read(data)
output_string = ipuz.write(output)
second_output = ipuz.read(output_string)
self.assertEqual(output, second_output)
def test_v2_ipuz_file_with_jsonp(self):
with open("fixtures/example_v2.ipuz") as f:
data = f.read()
output = ipuz.read(data)
output_string = ipuz.write(output, jsonp=True)
second_output = ipuz.read(output_string)
self.assertEqual(output, second_output)
|
svisser/ipuz
|
tests/test_ipuz.py
|
Python
|
mit
| 10,700
|
"""
This is the model that stimulates the behavior
of bacterias according to toxin and nutrients level.
"""
import sys
from random import randint
from indra.agent import Agent
from indra.composite import Composite
from indra.display_methods import BLUE, GREEN, RED
from indra.env import Env
from registry.registry import get_env, get_group, get_prop
from registry.registry import user_tell, run_notice
from indra.space import DEF_HEIGHT, DEF_WIDTH, distance
from indra.utils import init_props
MODEL_NAME = "bacteria"
DEBUG = False # turns debugging code on or off
DEBUG2 = False # turns deeper debugging code on or off
BACTERIA = "Bacteria"
NUTRIENTS = "Nutrients"
TOXINS = "Toxins"
DEF_NUM_BACT = 1
NUM_TOXINS = 1
DEF_NUM_NUTRIENTS = 1
DEF_THRESHOLD = -0.2
DEF_TOXIN_MOVE = 1
DEF_BACTERIUM_MOVE = 3
DEF_NUTRIENT_MOVE = 2
def calc_toxin(group, agent):
"""
Calculate the strength of a toxin / nutrient field for an agent.
We will use an inverse square law.
"""
toxin_strength = 0
for toxin in group:
if distance(group[toxin], agent) != 0:
toxin_strength += 1 / (distance(group[toxin], agent) ** 2)
else:
toxin_strength += sys.maxsize
toxin_strength *= -1
return toxin_strength
def calc_nutrient(group, agent):
nutrient_strength = 0
for nutrient in group:
if distance(group[nutrient], agent) != 0:
nutrient_strength += 1 / (distance(group[nutrient], agent) ** 2)
else:
nutrient_strength += sys.maxsize
return nutrient_strength
def bacterium_action(agent, **kwargs):
"""
Algorithm:
1) sense env
(toxin_level = calc_toxin(toxins, agent))
2) see if it is worse or better than previous env
3) if worse, change direction
(agent["angle"] = new_angle)
4) move (done automatically by returning False)
"""
if DEBUG:
user_tell("I'm " + agent.name + " and I'm hungry.")
toxin_level = calc_toxin(get_group(TOXINS), agent)
nutrient_level = calc_nutrient(
get_group(NUTRIENTS), agent)
if agent["prev_toxicity"] is not None:
toxin_change = toxin_level - agent["prev_toxicity"]
else:
toxin_change = sys.maxsize * (-1)
if agent["prev_nutricity"] is not None:
nutrient_change = nutrient_level - agent["prev_nutricity"]
else:
nutrient_change = sys.maxsize * (-1)
threshold = DEF_THRESHOLD
agent["prev_toxicity"] = toxin_level
agent["prev_nutricity"] = nutrient_level
if (toxin_change > nutrient_change) or (threshold >= toxin_level):
if agent["angle"] is None:
new_angle = randint(0, 360)
else:
angle_shift = randint(45, 315)
new_angle = agent["angle"] + angle_shift
if (new_angle > 360):
new_angle = new_angle % 360
agent["angle"] = new_angle
# return False means to move
return False
def toxin_action(agent, **kwargs):
if DEBUG:
user_tell("I'm " + agent.name + " and I'm poisonous.")
# return False means to move
return False
def nutrient_action(agent, **kwargs):
if DEBUG:
user_tell("I'm " + agent.name + " and I'm nutrious.")
# return False means to move
return False
def create_bacterium(name, i):
"""
Create a baterium.
"""
bacterium = Agent(name + str(i), action=bacterium_action)
bacterium["prev_toxicity"] = None
bacterium["prev_nutricity"] = None
bacterium["angle"] = None
bacterium["max_move"] = get_prop("bacterium_move",
DEF_BACTERIUM_MOVE)
return bacterium
def create_toxin(name, i):
"""
Create a toxin.
"""
toxin = Agent(name + str(i), action=toxin_action)
toxin["max_move"] = get_prop("toxin_move", DEF_TOXIN_MOVE)
return toxin
def create_nutrient(name, i):
"""
Create a nutrient.
"""
nutrient = Agent(name + str(i), action=nutrient_action)
nutrient["max_move"] = get_prop("nutrient_move",
DEF_NUTRIENT_MOVE)
return nutrient
def set_up(props=None):
"""
A func to set up run that can also be used by test code.
"""
init_props(MODEL_NAME, props)
toxins = Composite(TOXINS, {"color": RED},
member_creator=create_toxin,
num_members=get_prop('num_toxins', NUM_TOXINS))
nutrients = Composite(NUTRIENTS, {"color": GREEN},
member_creator=create_nutrient,
num_members=get_prop('num_nutrients', NUM_TOXINS))
bacteria = Composite(BACTERIA, {"color": BLUE},
member_creator=create_bacterium,
num_members=get_prop('num_toxins',
DEF_NUM_BACT))
Env(MODEL_NAME,
height=get_prop('grid_height', DEF_HEIGHT),
width=get_prop('grid_width', DEF_WIDTH),
members=[toxins, nutrients, bacteria])
def main():
set_up()
run_notice(MODEL_NAME)
get_env()()
return 0
if __name__ == "__main__":
main()
|
gcallah/Indra
|
models/bacteria.py
|
Python
|
gpl-3.0
| 5,141
|
try:
import simplejson as json
except ImportError:
import json
import urllib2, socket
import cPickle as pickle
from time import strftime, localtime, time
def postRequest(obj):
#print obj
request = urllib2.Request("http://127.0.0.1/zabbix/api_jsonrpc.php")
request.add_header('Content-Type' , 'application/json-rpc')
request.add_header('User-agent', 'script_by_Mateus/1.0')
response = urllib2.urlopen(request, json.dumps(obj))
#print 'Receive: %s' % content
return json.loads(response.read())
class api:
def __init__(self):
self.obj={'jsonrpc': '2.0', 'params':{}}
self.api_translate={}
self.history={}
# translate from: https://www.zabbix.com/documentation/2.4/manual/api/reference/action/object
self.api_translate["action"] = {
"eventsource": "check event->source",
"filter": {
"conditions": {
"conditiontype": {
# Possible values for trigger actions:
"host group": 0,
"host": 1,
"trigger": 2,
"trigger name": 3,
"trigger severity": 4,
"trigger value": 5,
"time period": 6,
"host template": 13,
"application": 15,
"maintenance status": 16,
# Possible values for discovery actions:
"host IP": 7,
"discovered service type": 8,
"discovered service port": 9,
"discovery status": 10,
"uptime or downtime duration": 11,
"received value": 12,
"discovery rule": 18,
"discovery check": 19,
"proxy": 20,
"discovery object": 21,
# Possible values for auto-registration actions:
"proxy": 20,
"host name": 22,
"host metadata": 24,
# Possible values for internal actions:
"host group": 0,
"host": 1,
"host template": 13,
"application": 15,
"event type": 23,
},
"operator":{
"(default) =": 0,
"<>": 1,
"like": 2,
"not like": 3,
"in": 4,
">=": 5,
"<=": 6,
"not it": 7,
}
},
"evaltype": {
"and/or" : 0,
"and" : 1,
"or": 2,
"custom expression":3
}
},
"status": {
"(default) enabled":0,
"disabled":1
}
}
# translate from: https://www.zabbix.com/documentation/2.4/manual/api/reference/event/object
self.api_translate["event"] = {
"source": {
"event created by a trigger": 0,
"event created by a discovery rule": 1,
"event created by active agent auto-registration": 2,
"internal event": 3
}
}
# translate from: https://www.zabbix.com/documentation/2.4/manual/api/reference/trigger/object
self.api_translate["trigger"] = {
"priority": {
0: "N/A",
1: "information",
2: "warning",
3: "average",
4: "high",
5: "disater"
}
}
'''
Authenticate and use the token on the next request
'''
def login(self, user, password):
self.obj["params"]["user"]=user
self.obj["params"]["password"]=password
self.obj["method"]="user.login"
self.obj["id"]=1
self.obj["auth"]=postRequest(self.obj)["result"]
#print self.obj
'''
Check if a action name exist
return True/False
'''
def Exist_action_by_Name(self, name):
exists = False
self.obj["result"]={}
self.generic_method("action.exists", { "name": name})
if "result" in self.obj:
exists=self.obj["result"]
return exists
'''
self explain
'''
def Create_Hostgroup(self, hostgroup_name):
if self.Hostgroup_Exist_by_Name(hostgroup_name):
print "OK: hostgroup[%s] already exist" % (hostgroup_name)
else:
self.obj["result"]={}
self.generic_method("hostgroup.create", { "name": hostgroup_name})
if "groupids" in self.obj["result"]:
print "OK: hostgroup[%s] created" % hostgroup_name
else:
print "ERR: can't create hostgroup[%s]" % hostgroup_name
'''
return a list of hostgroup id that a hostname is a member
'''
def Host_Get_HostGroupIdList_by_HostName(self,hostname):
list = []
self.obj["result"]={}
self.generic_method("host.get", { "output": ["hostid"], "selectGroups": ["groupid"],"filter": { "host": hostname } })
if "groups" in self.obj["result"][0]:
for groups in self.obj["result"][0]["groups"]:
if "groupid" in groups:
list.append(int(groups["groupid"]))
return list
'''
Add a hostname to a hostgroup
'''
def Host_Update_Add_HostGroup_by_Name(self,hostname,hostgroup_name):
if self.Hostgroup_Exist_by_Name(hostgroup_name):
if self.Hostname_Exists(hostname):
Current=self.Host_Get_HostGroupIdList_by_HostName(hostname)
groupid=self.Hostgroup_Get_Hostgroupid_by_Name(hostgroup_name)
if groupid in Current:
print "OK: Hostname[%s] already in HostGroup[%s]" % (hostname,hostgroup_name)
else:
Current.append(int(groupid))
hostid=self.Host_Get_HostId_by_Hostname(hostname)
print "OK: Adding Hostname[%s] to Hostgroup[%s]" % (hostname,hostgroup_name)
PARAM={"hostid":hostid,"groups": [] }
for ID in Current:
PARAM["groups"].append({ "groupid": ID })
self.generic_method("host.update", PARAM)
else:
print "ERR: hostname[%s] not found" % (hostname)
else:
print "ERR: hostgroup[%s] not found" % (hostgroup_name)
'''
return the hostgroup_id to a hostgroup_name
'''
def Hostgroup_Get_Hostgroupid_by_Name(self,hostgroup_name):
id=0
self.obj["result"]={}
self.generic_method("hostgroup.get", { "output": "extended", "filter": { "name": [hostgroup_name]} } )
if "result" in self.obj:
for response in self.obj["result"]:
if "groupid" in response:
id=int(response["groupid"])
return id
'''
return the if the hostgroup_name exist (true/false)
'''
def Hostgroup_Exist_by_Name(self,hostgroup_name):
test=False
self.obj["result"]={}
self.generic_method("hostgroup.exists", { "name": hostgroup_name } )
if "result" in self.obj:
test=int(self.obj["result"])
return test
'''
return the if the template_name exist (true/false)
'''
def Template_Exist(self,hostgroup_name):
test=False
self.obj["result"]={}
self.generic_method("template.exists", { "name": hostgroup_name } )
if "result" in self.obj:
test=int(self.obj["result"])
return test
'''
return the template_id to a template_name
'''
def Template_Get_Templateid_by_Name(self, name):
templateid = 0
self.obj["result"]={}
self.generic_method("template.get", { "output": ["hostid"], "filter": { "host": name } })
if "templateid" in self.obj["result"][0]:
templateid=self.obj["result"][0]["templateid"]
return templateid
'''
return the if the hostname exist (true/false)
'''
def Hostname_Exists(self, hostname):
test = False
self.obj["result"]={}
self.generic_method("host.exists", { "host": hostname } )
if "result" in self.obj:
test=self.obj["result"]
return test
'''
return a list of template_id for a hostname
'''
def Host_Get_TemplateIdList_by_Hostname(self, hostname):
list = []
self.obj["result"]={}
self.generic_method("host.get", { "output": ["hostid"], "selectParentTemplates": ["templateid"],"filter": { "host": hostname } })
if "parentTemplates" in self.obj["result"][0]:
for template in self.obj["result"][0]["parentTemplates"]:
list.append(int(template["templateid"]))
return list
'''
return the host_id to a hostname
'''
def Host_Get_HostId_by_Hostname(self, hostname):
id = 0
self.obj["result"]={}
output=self.generic_method("host.get", { "output": ["hostid"], "filter": { "host": hostname } })
if "hostid" in self.obj["result"][0]:
id = self.obj["result"][0]["hostid"]
return id
'''
create a autoregister action for a S.O and add a template_name
'''
def Action_Create_Autoregister_by_Name(self, autoregister_name, SO_name, template_name):
templateid = 0
if self.Exist_action_by_Name(autoregister_name):
print "OK: Auto Registration[%s] Action already exist" % (autoregister_name)
else:
templateid = self.Template_Get_Templateid_by_Name(template_name)
self.obj["result"]={}
PARAM={
"name": autoregister_name,
"eventsource": zabbix_api.api_translate["event"]["source"]["event created by active agent auto-registration"],
"status": zabbix_api.api_translate["action"]["status"]["(default) enabled"],
"esc_period": 0,
"filter": {
"evaltype": zabbix_api.api_translate["action"]["filter"]["evaltype"]["and/or"],
"conditions": [ {
"conditiontype": zabbix_api.api_translate["action"]["filter"]["conditions"]["conditiontype"]["host metadata"],
"operator": zabbix_api.api_translate["action"]["filter"]["conditions"]["operator"]["like"],
"value": SO_name
} ]
},
"operations": [
{
"esc_step_from": 1,
"esc_period": 0,
"optemplate": [
{ "templateid": templateid }
],
"operationtype": 6,
"esc_step_to": 1
}
]
}
self.generic_method("action.create", PARAM)
if "actionids" in self.obj["result"]:
print "OK: Add Auto Registration[%s] Action with ID[%s]" % (autoregister_name, str(self.obj["result"]["actionids"]))
'''
Add a template to a hostname
'''
def Host_Update_Add_Template_by_Name(self, hostname, templatename):
if self.Template_Exist(templatename):
templateid = self.Template_Get_Templateid_by_Name(templatename)
if self.Hostname_Exists(hostname):
hostid = self.Host_Get_HostId_by_Hostname(hostname)
Template_List=self.Host_Get_TemplateIdList_by_Hostname(hostname)
if int(templateid) in Template_List:
print "OK: Hostname[%s] already has the template [%s]" % (hostname,templatename)
else:
Template_List.append(int(templateid))
print "OK: Adding template[%s] to Hostname[%s]" % (templatename,hostname)
PARAM={"hostid":hostid,"templates": [] }
for ID in Template_List:
PARAM["templates"].append({ "templateid": ID })
self.generic_method("host.update", PARAM)
else:
print "ERR: Hostname[%s] not found" % (hostname)
else:
print "ERR: Template[%s] not found" % (templatename)
'''
Load Zabbix template info
'''
def get_zabbix_template(self, TemplateId):
if not "template" in self.history: self.history["template"]={}
if not TemplateId in self.history["template"]:
print "looking for template [%s]" % (TemplateId)
self.generic_method("template.get",{ "output": "extend", "templateid":TemplateId})
TemplateHost="N/A"
for result in zabbix_api.obj["result"]:
if TemplateId == result["templateid"]:
TemplateHost=result["host"]
#print "templateid[%s] - name[%s]" % ( TemplateId, TemplateHost )
self.history["template"][TemplateId]={"host":TemplateHost}
'''
Load Zabbix host info
'''
def get_zabbix_host(self, HostId):
if not "host" in self.history: self.history["host"]={}
if not HostId in self.history["host"]:
print "looking for hostid [%s]" % (HostId)
self.generic_method("host.get", {"output": "extend", "hostid":HostId})
#self.status["host"]={}
#{u'available': u'1', u'maintenance_type': u'0', u'ipmi_errors_from': u'0', u'ipmi_username': u'', u'snmp_disable_until': u'0', u'ipmi_authtype': u'0', u'ipmi_disable_until': u'0', u'lastaccess': u'0', u'snmp_error': u'', u'ipmi_privilege': u'2', u'jmx_error': u'', u'jmx_available': u'0', u'maintenanceid': u'0', u'snmp_available': u'0', u'status': u'0', u'description': u'', u'host': u'zabbix-server', u'disable_until': u'0', u'ipmi_password': u'', u'templateid': u'0', u'ipmi_available': u'0', u'maintenance_status': u'0', u'snmp_errors_from': u'0', u'ipmi_error': u'', u'proxy_hostid': u'0', u'hostid': u'10105', u'name': u'zabbix-server', u'jmx_errors_from': u'0', u'jmx_disable_until': u'0', u'flags': u'0', u'error': u'', u'maintenance_from': u'0', u'errors_from': u'0'}
Host="N/A"
HostName="N/A"
for response in zabbix_api.obj["result"]:
if HostId==response["hostid"]:
Host=response["host"]
HostName=response["name"]
self.history["host"][HostId]={"host":Host, "name":HostName}
'''
Load Zabbix trigger info
'''
def get_zabbix_trigger(self, TriggerId, lastchange=0):
Update=False
CurrentTime=int(time())
if not "trigger" in self.history: self.history["trigger"]={}
if not TriggerId in self.history["trigger"]:
print "looking for triggerid [%s]" % (TriggerId)
self.history["trigger"][TriggerId]={"templateid":"N/A", "lastchange":lastchange, "value":"N/A", "priority":"N/A", "description": "N/A", "host": "N/A", "lastupdate": CurrentTime}
Update=True
# Check if lastchange on Zabbix trigger is newer than the stored one
if (int(lastchange) > int(self.history["trigger"][TriggerId]["lastchange"])): Update=True
# Check if the stored information is older than 1 hour
if (int(CurrentTime) > int(self.history["trigger"][TriggerId]["lastupdate"]+3600)): Update=True
if Update:
self.generic_method("trigger.get",{ "output": ["triggerid", "templateid", "lastchange", "description", "value", "priority"], "selectGroups":"groupid", "selectHosts":"hostid"})
for response in zabbix_api.obj["result"]:
if TriggerId==response["triggerid"]:
for Hosts in response["hosts"]:
self.get_zabbix_host(Hosts["hostid"])
self.history["trigger"][TriggerId]["host"]=self.history["host"][Hosts["hostid"]]["host"]
self.history["trigger"][TriggerId]["templateid"]=response["templateid"]
self.get_zabbix_template(response["templateid"])
self.history["trigger"][TriggerId]["lastchange"]=response["lastchange"]
self.history["trigger"][TriggerId]["lastupdate"]=CurrentTime
self.history["trigger"][TriggerId]["description"]=response["description"]
self.history["trigger"][TriggerId]["value"]=response["value"]
self.history["trigger"][TriggerId]["priority"]=response["priority"]
'''
Print Zabbix Trigger
'''
def print_zabbix_trigger(self, TriggerId):
if int(self.history["trigger"][TriggerId]["value"])==1:
Value="Active"
else:
Value="Inactive"
Priority=self.api_translate["trigger"]["priority"][int(self.history["trigger"][TriggerId]["priority"])]
LastChange=strftime("%d/%m/%y %H:%M:%S",localtime(int(self.history["trigger"][TriggerId]["lastchange"])))
Now=strftime("%d/%m/%y %H:%M:%S",localtime())
print "%s - Trigger:[%s] description:[%s] host:[%s] priority:[%s] Lastchange:[%s]" % (Now, Value, self.history["trigger"][TriggerId]["description"], self.history["trigger"][TriggerId]["host"], Priority, LastChange)
'''
Get zabbix status
'''
def get_zabbix_statu(self):
Priority={}
for item in self.api_translate["trigger"]["priority"]:
Priority[self.api_translate["trigger"]["priority"][item]]=item
# read lastchange and value of all triggers
self.generic_method("trigger.get",{ "output": ["triggerid", "lastchange", "value"]})
for response in zabbix_api.obj["result"]:
TriggerId=response["triggerid"]
#for active triggers
if int(response["value"])==1:
self.get_zabbix_trigger(TriggerId, response["lastchange"])
self.print_zabbix_trigger(TriggerId)
## read all events info
#self.generic_method("event.get",{ "output": "extend", "selectHosts":"extend", "sortfield": ["eventid"] })
#for response in zabbix_api.obj["result"]:
#print response
#EventId=response["eventid"]
#EventTime=response["clock"]
## ObjectId = TriggerId
#TriggerId=response["objectid"]
#self.get_zabbix_trigger(TriggerId)
#TemplateId=self.history["trigger"][TriggerId]["templateid"]
#if TemplateId in self.history["template"]:
#TemplateHost=self.history["template"][TemplateId]["host"]
#else:
#TemplateHost="N/A"
#TriggerValue=self.history["trigger"][TriggerId]["value"]
#TriggerPriority=self.history["trigger"][TriggerId]["priority"]
#TriggerLastChange=self.history["trigger"][TriggerId]["lastchange"]
#TriggerDescription=self.history["trigger"][TriggerId]["description"]
#if "lastdata" not in self.history: self.history["lastdata"]={}
#if "event" not in self.history["lastdata"]: self.history["lastdata"]["event"]=0
##if EventId>self.history["lastdata"]["event"]:
#Time=strftime("%d/%m/%y %H:%M:%S",localtime(int(response["clock"])))
#LastTime=strftime("%d/%m/%y %H:%M:%S",localtime(int(TriggerLastChange)))
##print "%s - eventid[%s] - templateid[%s] - Host[%s] - TriggerId[%s] - TriggerValue[%s] - TriggerPriority[%s] - TriggerLastChange[%s] - TriggerDescription[%s]" % (Time, EventId, TemplateId, TemplateHost, TriggerId, TriggerValue, TriggerPriority, LastTime, TriggerDescription)
#self.history["lastdata"]["event"]=EventId
'''
Load history
'''
def history_load(self, FILE_NAME):
try:
with open(FILE_NAME, 'rb') as fp:
data = pickle.load(fp)
except:
data={}
self.history=data
'''
Save history
'''
def history_save(self, FILE_NAME):
with open(FILE_NAME, 'wb') as fp:
pickle.dump(self.history, fp)
'''
Generic request method
'''
def generic_method(self, method, params):
self.obj["method"] = method
self.obj["params"] = params
#self.obj["id"] = self.obj["id"] +1
output=postRequest(self.obj)
if "result" in output:
self.obj["result"]=output["result"]
else:
print "Error, can't retrive data for---------------:", self.obj
print "--------------------------------------------:"
#####print zabbix_api.api_translate
hostname = socket.gethostname()
#print socket.gethostname().split(".")[0]
zabbix_api = api()
zabbix_api.login("admin", "zabbix")
zabbix_api.history_load(".api_history")
#zabbix_api.Action_Create_Autoregister_by_Name("Linux autoregistration", "Linux", "Template OS Linux")
#zabbix_api.Action_Create_Autoregister_by_Name("Windows autoregistration", "Windows", "Template OS Windows")
#zabbix_api.Host_Update_Add_Template_by_Name(hostname, "Template App Zabbix Server")
#zabbix_api.Host_Update_Add_Template_by_Name(hostname, "Template OS Linux")
#zabbix_api.Create_Hostgroup("Monitoring")
#zabbix_api.Host_Update_Add_HostGroup_by_Name(hostname,"Monitoring")
zabbix_api.get_zabbix_statu()
zabbix_api.history_save(".api_history")
|
drbartz/zabbix_api
|
zabbix_api.py
|
Python
|
apache-2.0
| 22,095
|
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H:i:s'
TIME_FORMAT = 'H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i:s'
#DECIMAL_SEPARATOR = ','
#THOUSAND_SEPARATOR = ' '
#NUMBER_GROUPING = 3
|
ConnorMac/stokvel.io
|
src/config/formats/en-za/formats.py
|
Python
|
mit
| 263
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Test bin/update-release-info.py. Also verify that the original files
have the appropriate triggers to cause the modifications.
"""
__revision__ = "test/update-release-info/update-release-info.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os, sys, time
import TestRuntest
# Needed to ensure we're using the correct year
this_year=time.localtime()[0]
TestSCons = 'QMTest/TestSCons.py' .split('/')
README = 'README.rst' .split('/')
ReleaseConfig = 'ReleaseConfig' .split('/')
SConstruct = 'SConstruct' .split('/')
Announce = 'src/Announce.txt' .split('/')
CHANGES = 'src/CHANGES.txt' .split('/')
RELEASE = 'src/RELEASE.txt' .split('/')
Main = 'src/engine/SCons/Script/Main.py' .split('/')
main_in = 'doc/user/main.in' .split('/')
main_xml = 'doc/user/main.xml' .split('/')
test = TestRuntest.TestRuntest(
program = os.path.join('bin', 'update-release-info.py'),
things_to_copy = ['bin']
)
if not os.path.exists(test.program):
test.skip_test("update-release-info.py is not distributed in this package\n")
test.run(arguments = 'bad', status = 1)
# Strings to go in ReleaseConfig
combo_strings = [
# Index 0: version tuple with bad release level
"""version_tuple = (2, 0, 0, 'bad', 0)
""",
# Index 1: Python version tuple
"""unsupported_python_version = (2, 6)
""",
# Index 2: Python version tuple
"""deprecated_python_version = (2, 7)
""",
# Index 3: alpha version tuple
"""version_tuple = (2, 0, 0, 'alpha', 0)
""",
# Index 4: final version tuple
"""version_tuple = (2, 0, 0, 'final', 0)
""",
# Index 5: bad release date
"""release_date = (%d, 12)
"""%this_year,
# Index 6: release date (hhhh, mm, dd)
"""release_date = (%d, 12, 21)
"""%this_year,
# Index 7: release date (hhhh, mm, dd, hh, mm, ss)
"""release_date = (%d, 12, 21, 12, 21, 12)
"""%this_year,
]
combo_error = \
"""ERROR: Config file must contain at least version_tuple,
\tunsupported_python_version, and deprecated_python_version.
"""
def combo_fail(*args, **kw):
kw.setdefault('status', 1)
combo_run(*args, **kw)
def combo_run(*args, **kw):
t = '\n'
for a in args:
t += combo_strings[a]
test.write(ReleaseConfig, t)
kw.setdefault('stdout', combo_error)
test.run(**kw)
combo_fail()
combo_fail(0)
combo_fail(1)
combo_fail(2)
combo_fail(0, 1)
combo_fail(0, 2)
combo_fail(1, 2)
combo_fail(0, 1, 2, stdout =
"""ERROR: `bad' is not a valid release type in version tuple;
\tit must be one of alpha, beta, candidate, or final
""")
# We won't need this entry again, so put in a default
combo_strings[0] = combo_strings[1] + combo_strings[2] + combo_strings[3]
combo_fail(0, 5, stdout =
"""ERROR: Invalid release date (%d, 12)
"""%this_year )
def pave(path):
path = path[:-1]
if not path or os.path.isdir(os.path.join(*path)):
return
pave(path)
test.subdir(path)
def pave_write(file, contents):
pave(file)
test.write(file, contents)
pave_write(CHANGES, """
RELEASE It doesn't matter what goes here...
""")
pave_write(RELEASE, """
This file has a 3.2.1.beta.20121221 version string in it
""")
pave_write(Announce, """
RELEASE It doesn't matter what goes here...
""")
pave_write(SConstruct, """
month_year = 'March 1945'
copyright_years = '2001, 2002, 2003, 2004, 2005, 2006, 2007'
default_version = '0.98.97'
""")
pave_write(README, """
These files are a part of 33.22.11:
scons-33.22.11.tar.gz
scons-33.22.11.win32.exe
scons-33.22.11.zip
scons-33.22.11.rpm
scons-33.22.11.deb
scons-33.22.11.beta.20012122112.suffix
""")
pave_write(TestSCons, """
copyright_years = Some junk to be overwritten
default_version = More junk
python_version_unsupported = Yep, more junk
python_version_deprecated = And still more
""")
pave_write(Main, """
unsupported_python_version = Not done with junk
deprecated_python_version = It goes on forever
""")
pave_write(main_in, """
TODO
""")
pave_write(main_xml, """
TODO
""")
def updating_run(*args):
stdout = ''
for file in args:
stdout += 'Updating %s...\n' % os.path.join(*file)
combo_run(0, 7, stdout = stdout)
updating_run(CHANGES, RELEASE, Announce, SConstruct, README, TestSCons, Main)
test.must_match(CHANGES, """
RELEASE 2.0.0.alpha.yyyymmdd - NEW DATE WILL BE INSERTED HERE
""", mode = 'r')
test.must_match(RELEASE, """
This file has a 2.0.0.alpha.yyyymmdd version string in it
""", mode = 'r')
test.must_match(Announce, """
RELEASE 2.0.0.alpha.yyyymmdd - NEW DATE WILL BE INSERTED HERE
""", mode = 'r')
years = '2001 - %d'%(this_year + 1)
test.must_match(SConstruct, """
month_year = 'MONTH YEAR'
copyright_years = %s
default_version = '2.0.0.alpha.yyyymmdd'
""" % repr(years), mode = 'r')
test.must_match(README, """
These files are a part of 33.22.11:
scons-2.0.0.alpha.yyyymmdd.tar.gz
scons-2.0.0.alpha.yyyymmdd.win32.exe
scons-2.0.0.alpha.yyyymmdd.zip
scons-2.0.0.alpha.yyyymmdd.rpm
scons-2.0.0.alpha.yyyymmdd.deb
scons-2.0.0.alpha.yyyymmdd.suffix
""", mode = 'r')
# should get Python floors from TestSCons module.
test.must_match(TestSCons, """
copyright_years = '%s'
default_version = '2.0.0.alpha.yyyymmdd'
python_version_unsupported = (2, 6)
python_version_deprecated = (2, 7)
"""%years, mode = 'r')
# should get Python floors from TestSCons module.
test.must_match(Main, """
unsupported_python_version = (2, 6)
deprecated_python_version = (2, 7)
""", mode = 'r')
#TODO: Release option
#TODO: ==============
#TODO:
#TODO: Dates in beta/candidate flow
#TODO:
#TODO: Dates in final flow
#TODO:
#TODO: Post option
#TODO: ===========
#TODO:
#TODO: Dates in post flow
#TODO:
#TODO: Update minor or micro version
#TODO:
#TODO: ReleaseConfig - new version tuple
#TODO:
#TODO: CHANGES - new section
#TODO:
#TODO: RELEASE - new template
#TODO:
#TODO: Announce - new section
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
test/update-release-info/update-release-info.py
|
Python
|
mit
| 7,356
|
from arena_sync import *
from stats import *
|
siggame/webserver
|
webserver/hermes/tasks/__init__.py
|
Python
|
bsd-3-clause
| 45
|
"""CPStats, a package for collecting and reporting on program statistics.
Overview
========
Statistics about program operation are an invaluable monitoring and debugging
tool. Unfortunately, the gathering and reporting of these critical values is
usually ad-hoc. This package aims to add a centralized place for gathering
statistical performance data, a structure for recording that data which
provides for extrapolation of that data into more useful information,
and a method of serving that data to both human investigators and
monitoring software. Let's examine each of those in more detail.
Data Gathering
--------------
Just as Python's `logging` module provides a common importable for gathering
and sending messages, performance statistics would benefit from a similar
common mechanism, and one that does *not* require each package which wishes
to collect stats to import a third-party module. Therefore, we choose to
re-use the `logging` module by adding a `statistics` object to it.
That `logging.statistics` object is a nested dict. It is not a custom class,
because that would:
1. require libraries and applications to import a third-party module in order to participate
2. inhibit innovation in extrapolation approaches and in reporting tools, and
3. be slow.
There are, however, some specifications regarding the structure of the dict.::
{
+----"SQLAlchemy": {
| "Inserts": 4389745,
| "Inserts per Second":
| lambda s: s["Inserts"] / (time() - s["Start"]),
| C +---"Table Statistics": {
| o | "widgets": {-----------+
N | l | "Rows": 1.3M, | Record
a | l | "Inserts": 400, |
m | e | },---------------------+
e | c | "froobles": {
s | t | "Rows": 7845,
p | i | "Inserts": 0,
a | o | },
c | n +---},
e | "Slow Queries":
| [{"Query": "SELECT * FROM widgets;",
| "Processing Time": 47.840923343,
| },
| ],
+----},
}
The `logging.statistics` dict has four levels. The topmost level is nothing
more than a set of names to introduce modularity, usually along the lines of
package names. If the SQLAlchemy project wanted to participate, for example,
it might populate the item `logging.statistics['SQLAlchemy']`, whose value
would be a second-layer dict we call a "namespace". Namespaces help multiple
packages to avoid collisions over key names, and make reports easier to read,
to boot. The maintainers of SQLAlchemy should feel free to use more than one
namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case
or other syntax constraints on the namespace names; they should be chosen
to be maximally readable by humans (neither too short nor too long).
Each namespace, then, is a dict of named statistical values, such as
'Requests/sec' or 'Uptime'. You should choose names which will look
good on a report: spaces and capitalization are just fine.
In addition to scalars, values in a namespace MAY be a (third-layer)
dict, or a list, called a "collection". For example, the CherryPy
:class:`StatsTool` keeps track of what each request is doing (or has most
recently done) in a 'Requests' collection, where each key is a thread ID; each
value in the subdict MUST be a fourth dict (whew!) of statistical data about
each thread. We call each subdict in the collection a "record". Similarly,
the :class:`StatsTool` also keeps a list of slow queries, where each record
contains data about each slow query, in order.
Values in a namespace or record may also be functions, which brings us to:
Extrapolation
-------------
The collection of statistical data needs to be fast, as close to unnoticeable
as possible to the host program. That requires us to minimize I/O, for example,
but in Python it also means we need to minimize function calls. So when you
are designing your namespace and record values, try to insert the most basic
scalar values you already have on hand.
When it comes time to report on the gathered data, however, we usually have
much more freedom in what we can calculate. Therefore, whenever reporting
tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents of
`logging.statistics` for reporting, they first call `extrapolate_statistics`
(passing the whole `statistics` dict as the only argument). This makes a
deep copy of the statistics dict so that the reporting tool can both iterate
over it and even change it without harming the original. But it also expands
any functions in the dict by calling them. For example, you might have a
'Current Time' entry in the namespace with the value "lambda scope: time.time()".
The "scope" parameter is the current namespace dict (or record, if we're
currently expanding one of those instead), allowing you access to existing
static entries. If you're truly evil, you can even modify more than one entry
at a time.
However, don't try to calculate an entry and then use its value in further
extrapolations; the order in which the functions are called is not guaranteed.
This can lead to a certain amount of duplicated work (or a redesign of your
schema), but that's better than complicating the spec.
After the whole thing has been extrapolated, it's time for:
Reporting
---------
The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates it all,
and then transforms it to HTML for easy viewing. Each namespace gets its own
header and attribute table, plus an extra table for each collection. This is
NOT part of the statistics specification; other tools can format how they like.
You can control which columns are output and how they are formatted by updating
StatsPage.formatting, which is a dict that mirrors the keys and nesting of
`logging.statistics`. The difference is that, instead of data values, it has
formatting values. Use None for a given key to indicate to the StatsPage that a
given column should not be output. Use a string with formatting (such as '%.3f')
to interpolate the value(s), or use a callable (such as lambda v: v.isoformat())
for more advanced formatting. Any entry which is not mentioned in the formatting
dict is output unchanged.
Monitoring
----------
Although the HTML output takes pains to assign unique id's to each <td> with
statistical data, you're probably better off fetching /cpstats/data, which
outputs the whole (extrapolated) `logging.statistics` dict in JSON format.
That is probably easier to parse, and doesn't have any formatting controls,
so you get the "original" data in a consistently-serialized format.
Note: there's no treatment yet for datetime objects. Try time.time() instead
for now if you can. Nagios will probably thank you.
Turning Collection Off
----------------------
It is recommended each namespace have an "Enabled" item which, if False,
stops collection (but not reporting) of statistical data. Applications
SHOULD provide controls to pause and resume collection by setting these
entries to False or True, if present.
Usage
=====
To collect statistics on CherryPy applications::
from cherrypy.lib import cpstats
appconfig['/']['tools.cpstats.on'] = True
To collect statistics on your own code::
import logging
# Initialize the repository
if not hasattr(logging, 'statistics'): logging.statistics = {}
# Initialize my namespace
mystats = logging.statistics.setdefault('My Stuff', {})
# Initialize my namespace's scalars and collections
mystats.update({
'Enabled': True,
'Start Time': time.time(),
'Important Events': 0,
'Events/Second': lambda s: (
(s['Important Events'] / (time.time() - s['Start Time']))),
})
...
for event in events:
...
# Collect stats
if mystats.get('Enabled', False):
mystats['Important Events'] += 1
To report statistics::
root.cpstats = cpstats.StatsPage()
To format statistics reports::
See 'Reporting', above.
"""
# -------------------------------- Statistics -------------------------------- #
import logging
if not hasattr(logging, 'statistics'): logging.statistics = {}
def extrapolate_statistics(scope):
"""Return an extrapolated copy of the given scope."""
c = {}
for k, v in list(scope.items()):
if isinstance(v, dict):
v = extrapolate_statistics(v)
elif isinstance(v, (list, tuple)):
v = [extrapolate_statistics(record) for record in v]
elif hasattr(v, '__call__'):
v = v(scope)
c[k] = v
return c
# --------------------- CherryPy Applications Statistics --------------------- #
import threading
import time
import cherrypy
appstats = logging.statistics.setdefault('CherryPy Applications', {})
appstats.update({
'Enabled': True,
'Bytes Read/Request': lambda s: (s['Total Requests'] and
(s['Total Bytes Read'] / float(s['Total Requests'])) or 0.0),
'Bytes Read/Second': lambda s: s['Total Bytes Read'] / s['Uptime'](s),
'Bytes Written/Request': lambda s: (s['Total Requests'] and
(s['Total Bytes Written'] / float(s['Total Requests'])) or 0.0),
'Bytes Written/Second': lambda s: s['Total Bytes Written'] / s['Uptime'](s),
'Current Time': lambda s: time.time(),
'Current Requests': 0,
'Requests/Second': lambda s: float(s['Total Requests']) / s['Uptime'](s),
'Server Version': cherrypy.__version__,
'Start Time': time.time(),
'Total Bytes Read': 0,
'Total Bytes Written': 0,
'Total Requests': 0,
'Total Time': 0,
'Uptime': lambda s: time.time() - s['Start Time'],
'Requests': {},
})
proc_time = lambda s: time.time() - s['Start Time']
class ByteCountWrapper(object):
"""Wraps a file-like object, counting the number of bytes read."""
def __init__(self, rfile):
self.rfile = rfile
self.bytes_read = 0
def read(self, size=-1):
data = self.rfile.read(size)
self.bytes_read += len(data)
return data
def readline(self, size=-1):
data = self.rfile.readline(size)
self.bytes_read += len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
return data
average_uriset_time = lambda s: s['Count'] and (s['Sum'] / s['Count']) or 0
class StatsTool(cherrypy.Tool):
"""Record various information about the current request."""
def __init__(self):
cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop)
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
if appstats.get('Enabled', False):
cherrypy.Tool._setup(self)
self.record_start()
def record_start(self):
"""Record the beginning of a request."""
request = cherrypy.serving.request
if not hasattr(request.rfile, 'bytes_read'):
request.rfile = ByteCountWrapper(request.rfile)
request.body.fp = request.rfile
r = request.remote
appstats['Current Requests'] += 1
appstats['Total Requests'] += 1
appstats['Requests'][threading._get_ident()] = {
'Bytes Read': None,
'Bytes Written': None,
# Use a lambda so the ip gets updated by tools.proxy later
'Client': lambda s: '%s:%s' % (r.ip, r.port),
'End Time': None,
'Processing Time': proc_time,
'Request-Line': request.request_line,
'Response Status': None,
'Start Time': time.time(),
}
def record_stop(self, uriset=None, slow_queries=1.0, slow_queries_count=100,
debug=False, **kwargs):
"""Record the end of a request."""
resp = cherrypy.serving.response
w = appstats['Requests'][threading._get_ident()]
r = cherrypy.request.rfile.bytes_read
w['Bytes Read'] = r
appstats['Total Bytes Read'] += r
if resp.stream:
w['Bytes Written'] = 'chunked'
else:
cl = int(resp.headers.get('Content-Length', 0))
w['Bytes Written'] = cl
appstats['Total Bytes Written'] += cl
w['Response Status'] = getattr(resp, 'output_status', None) or resp.status
w['End Time'] = time.time()
p = w['End Time'] - w['Start Time']
w['Processing Time'] = p
appstats['Total Time'] += p
appstats['Current Requests'] -= 1
if debug:
cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS')
if uriset:
rs = appstats.setdefault('URI Set Tracking', {})
r = rs.setdefault(uriset, {
'Min': None, 'Max': None, 'Count': 0, 'Sum': 0,
'Avg': average_uriset_time})
if r['Min'] is None or p < r['Min']:
r['Min'] = p
if r['Max'] is None or p > r['Max']:
r['Max'] = p
r['Count'] += 1
r['Sum'] += p
if slow_queries and p > slow_queries:
sq = appstats.setdefault('Slow Queries', [])
sq.append(w.copy())
if len(sq) > slow_queries_count:
sq.pop(0)
import cherrypy
cherrypy.tools.cpstats = StatsTool()
# ---------------------- CherryPy Statistics Reporting ---------------------- #
import os
thisdir = os.path.abspath(os.path.dirname(__file__))
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json = None
missing = object()
locale_date = lambda v: time.strftime('%c', time.gmtime(v))
iso_format = lambda v: time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v))
def pause_resume(ns):
def _pause_resume(enabled):
pause_disabled = ''
resume_disabled = ''
if enabled:
resume_disabled = 'disabled="disabled" '
else:
pause_disabled = 'disabled="disabled" '
return """
<form action="pause" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Pause" %s/>
</form>
<form action="resume" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Resume" %s/>
</form>
""" % (ns, pause_disabled, ns, resume_disabled)
return _pause_resume
class StatsPage(object):
formatting = {
'CherryPy Applications': {
'Enabled': pause_resume('CherryPy Applications'),
'Bytes Read/Request': '%.3f',
'Bytes Read/Second': '%.3f',
'Bytes Written/Request': '%.3f',
'Bytes Written/Second': '%.3f',
'Current Time': iso_format,
'Requests/Second': '%.3f',
'Start Time': iso_format,
'Total Time': '%.3f',
'Uptime': '%.3f',
'Slow Queries': {
'End Time': None,
'Processing Time': '%.3f',
'Start Time': iso_format,
},
'URI Set Tracking': {
'Avg': '%.3f',
'Max': '%.3f',
'Min': '%.3f',
'Sum': '%.3f',
},
'Requests': {
'Bytes Read': '%s',
'Bytes Written': '%s',
'End Time': None,
'Processing Time': '%.3f',
'Start Time': None,
},
},
'CherryPy WSGIServer': {
'Enabled': pause_resume('CherryPy WSGIServer'),
'Connections/second': '%.3f',
'Start time': iso_format,
},
}
def index(self):
# Transform the raw data into pretty output for HTML
yield """
<html>
<head>
<title>Statistics</title>
<style>
th, td {
padding: 0.25em 0.5em;
border: 1px solid #666699;
}
table {
border-collapse: collapse;
}
table.stats1 {
width: 100%;
}
table.stats1 th {
font-weight: bold;
text-align: right;
background-color: #CCD5DD;
}
table.stats2, h2 {
margin-left: 50px;
}
table.stats2 th {
font-weight: bold;
text-align: center;
background-color: #CCD5DD;
}
</style>
</head>
<body>
"""
for title, scalars, collections in self.get_namespaces():
yield """
<h1>%s</h1>
<table class='stats1'>
<tbody>
""" % title
for i, (key, value) in enumerate(scalars):
colnum = i % 3
if colnum == 0: yield """
<tr>"""
yield """
<th>%(key)s</th><td id='%(title)s-%(key)s'>%(value)s</td>""" % vars()
if colnum == 2: yield """
</tr>"""
if colnum == 0: yield """
<th></th><td></td>
<th></th><td></td>
</tr>"""
elif colnum == 1: yield """
<th></th><td></td>
</tr>"""
yield """
</tbody>
</table>"""
for subtitle, headers, subrows in collections:
yield """
<h2>%s</h2>
<table class='stats2'>
<thead>
<tr>""" % subtitle
for key in headers:
yield """
<th>%s</th>""" % key
yield """
</tr>
</thead>
<tbody>"""
for subrow in subrows:
yield """
<tr>"""
for value in subrow:
yield """
<td>%s</td>""" % value
yield """
</tr>"""
yield """
</tbody>
</table>"""
yield """
</body>
</html>
"""
index.exposed = True
def get_namespaces(self):
"""Yield (title, scalars, collections) for each namespace."""
s = extrapolate_statistics(logging.statistics)
for title, ns in sorted(s.items()):
scalars = []
collections = []
ns_fmt = self.formatting.get(title, {})
for k, v in sorted(ns.items()):
fmt = ns_fmt.get(k, {})
if isinstance(v, dict):
headers, subrows = self.get_dict_collection(v, fmt)
collections.append((k, ['ID'] + headers, subrows))
elif isinstance(v, (list, tuple)):
headers, subrows = self.get_list_collection(v, fmt)
collections.append((k, headers, subrows))
else:
format = ns_fmt.get(k, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v = format(v)
elif format is not missing:
v = format % v
scalars.append((k, v))
yield title, scalars, collections
def get_dict_collection(self, v, formatting):
"""Return ([headers], [rows]) for the given collection."""
# E.g., the 'Requests' dict.
headers = []
for record in v.itervalues():
for k3 in record:
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if k3 not in headers:
headers.append(k3)
headers.sort()
subrows = []
for k2, record in sorted(v.items()):
subrow = [k2]
for k3 in headers:
v3 = record.get(k3, '')
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v3 = format(v3)
elif format is not missing:
v3 = format % v3
subrow.append(v3)
subrows.append(subrow)
return headers, subrows
def get_list_collection(self, v, formatting):
"""Return ([headers], [subrows]) for the given collection."""
# E.g., the 'Slow Queries' list.
headers = []
for record in v:
for k3 in record:
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if k3 not in headers:
headers.append(k3)
headers.sort()
subrows = []
for record in v:
subrow = []
for k3 in headers:
v3 = record.get(k3, '')
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v3 = format(v3)
elif format is not missing:
v3 = format % v3
subrow.append(v3)
subrows.append(subrow)
return headers, subrows
if json is not None:
def data(self):
s = extrapolate_statistics(logging.statistics)
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps(s, sort_keys=True, indent=4)
data.exposed = True
def pause(self, namespace):
logging.statistics.get(namespace, {})['Enabled'] = False
raise cherrypy.HTTPRedirect('./')
pause.exposed = True
pause.cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['POST']}
def resume(self, namespace):
logging.statistics.get(namespace, {})['Enabled'] = True
raise cherrypy.HTTPRedirect('./')
resume.exposed = True
resume.cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['POST']}
|
paolodoz/timesheet
|
cherrypy/lib/cpstats.py
|
Python
|
gpl-2.0
| 22,573
|
# -*- coding: utf-8 -*-
"""Null device output module."""
from plaso.output import interface
from plaso.output import manager
class NullOutputModule(interface.OutputModule):
"""Null device output module."""
NAME = 'null'
DESCRIPTION = 'Output module that does not output anything.'
# pylint: disable=unused-argument
def WriteEventBody(self, event, event_data, event_data_stream, event_tag):
"""Writes event values to the output.
Args:
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
event_tag (EventTag): event tag.
"""
return
manager.OutputManager.RegisterOutput(NullOutputModule)
|
joachimmetz/plaso
|
plaso/output/null.py
|
Python
|
apache-2.0
| 711
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.