text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
from distutils.core import setup
packages_yambopy = ['yambopy',
'yambopy.io',
'yambopy.dbs',
'yambopy.bse',
'qepy',
'schedulerpy',
'yamboparser']
if __name__ == '__main__':
setup(name='yambopy',
version='0.1',
description='Automatic workflows for Yambo.',
author='Henrique Miranda',
author_email='miranda.henrique@gmail.com',
scripts=['scripts/yambopy'],
packages=packages_yambopy,
)
| henriquemiranda/yambopy | setup.py | Python | bsd-3-clause | 580 | [
"Yambo"
] | 473833673a066c5106220b27911d7230ee70462cf65216b2a943e4f25e433e37 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
__all__ = [
"ndtr",
"log_ndtr",
]
# log_ndtr uses different functions over the ranges
# (-infty, lower](lower, upper](upper, infty)
# Lower bound values were chosen by examining where the support of ndtr
# appears to be zero, relative to scipy's (which is always 64bit). They were
# then made more conservative just to be safe. (Conservative means use the
# expansion more than we probably need to.) See `NdtrTest` in
# special_math_test.py.
LOGNDTR_FLOAT64_LOWER = -20
LOGNDTR_FLOAT32_LOWER = -10
# Upper bound values were chosen by examining for which values of 'x'
# Log[cdf(x)] is 0, after which point we need to use the approximation
# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly
# conservative, meaning we use the approximation earlier than needed.
LOGNDTR_FLOAT32_UPPER = 5
LOGNDTR_FLOAT64_UPPER = 8
def ndtr(x, name=None):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t^2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, "ndtr", values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
def _ndtr(x):
"""Implements ndtr core logic."""
half_sqrt_2 = constant_op.constant(
0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
w = x * half_sqrt_2
z = math_ops.abs(w)
y = math_ops.select(math_ops.less(z, half_sqrt_2),
1. + math_ops.erf(w),
math_ops.select(math_ops.greater(w, 0.),
2. - math_ops.erfc(z),
math_ops.erfc(z)))
return 0.5 * y
def log_ndtr(x, series_order=3, name=None):
"""Log Normal distribution function.
For details of the Normal distribution function see `ndtr`.
This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or
using an asymptotic series. Specifically:
- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on
`log(1-x) ~= -x, x << 1`.
- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique
and take a log.
- For `x <= lower_segment`, we use the series approximation of erf to compute
the log CDF directly.
The `lower_segment` is set based on the precision of the input:
```
lower_segment = { -20. x.dtype=float64
{ -10, x.dtype=float32
upper_segment = { 8. x.dtype=float64
{ 5, x.dtype=float32
```
When `x < lower_segment`, the `ndtr` asymptotic series approximation is:
```
ndtr(x) = scale * (1 + sum) + R_N
scale = exp(-0.5 x^2) / (-x sqrt(2 pi))
sum = Sum{(-1)^n (2n-1)!! / (x^2)^n, n=1:N}
R_N = O(exp(-0.5 x^2) (2N+1)!! / |x|^{2N+3})
```
where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a
[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).
Args:
x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
TypeError: if `series_order` is a not Python `integer.`
ValueError: if `series_order` is not in `[1, 30]`.
"""
if not isinstance(series_order, int):
raise TypeError("series_order must be a Python integer.")
if series_order < 1:
raise ValueError("series_order must be positive.")
if series_order > 30:
raise ValueError("series_order must be <= 30.")
with ops.name_scope(name, "log_ndtr", values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype == np.float64:
lower_segment = LOGNDTR_FLOAT64_LOWER
upper_segment = LOGNDTR_FLOAT64_UPPER
elif x.dtype.as_numpy_dtype == np.float32:
lower_segment = LOGNDTR_FLOAT32_LOWER
upper_segment = LOGNDTR_FLOAT32_UPPER
else:
raise TypeError("x.dtype=%s is not supported." % x.dtype)
# The basic idea here was ported from py/scipy/special/cephes/ndtr.c.
# We copy the main idea, with a few changes
# * For x >> 1, and X ~ Normal(0, 1),
# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],
# which extends the range of validity of this function.
# * We use one fixed series_order for all of 'x', rather than adaptive.
# * Our docstring properly reflects that this is an asymptotic series, not a
# Tayor series. We also provided a correct bound on the remainder.
# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when
# x=0. This happens even though the branch is unchosen because when x=0
# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan
# regardless of whether dy is finite. Note that the minimum is a NOP if
# the branch is chosen.
return math_ops.select(
math_ops.greater(x, upper_segment),
-_ndtr(-x), # log(1-x) ~= -x, x << 1
math_ops.select(math_ops.greater(x, lower_segment),
math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),
_log_ndtr_lower(math_ops.minimum(x, lower_segment),
series_order)))
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, apppropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * math.log(2. * math.pi)
# Compute the summation.
even_sum = 0.
odd_sum = 0.
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
if n % 2:
odd_sum -= _double_factorial(2 * n - 1) / x_2n
else:
even_sum += _double_factorial(2 * n - 1) / x_2n
x_2n *= x_2
return log_scale + math_ops.log(1. + even_sum + odd_sum)
def _double_factorial(n):
"""The double factorial function for small Python integer `n`."""
return np.prod(np.arange(n, 1, -2))
| neilhan/tensorflow | tensorflow/contrib/bayesflow/python/ops/special_math.py | Python | apache-2.0 | 7,692 | [
"Gaussian"
] | 4ff48c042621e00057fca812ecdb5b0d39c6798d85b6d3a34db5bbf00a284ae3 |
import os
import sys
import DsaArrayResolutionCy3 as ARes
import cx_Oracle
import DsaTools3 as DsaTool
from DsaGetCycle3 import get_all_apdm, get_apdm
from collections import namedtuple
from DsaXmlParsers3 import *
from DsaConverter3 import *
prj = '{Alma/ObsPrep/ObsProject}'
val = '{Alma/ValueTypes}'
sbl = '{Alma/ObsPrep/SchedBlock}'
pd.options.display.width = 200
pd.options.display.max_columns = 100
Range = namedtuple('Range', ['start', 'end'])
PHASE_I_STATUS = ["Phase1Submitted", "Approved"]
# noinspection PyAttributeOutsideInit
class DsaDatabase3(object):
"""
DsaDatabase3 is the class that stores the Projects and SB information in
dataframes, and it also has the methods to connect and query the OSF
archive for this info (ETL)
A default instance will use the directory found on the $WTO system variable,
as a cache, and it will found the projects in the appropiate state for the
relevant Cycles.
:key refresh_apdm: Should the disk xml database be rebuilt? Default is True
:key path: path to store the xml database. Default is $WTO variable
:key allc2: temporal parameter for Cycle 3, load all Cycle2 projects, and
not only grade A. Default = True
:key loadp1: load phase I projects, and not only phase II. Default=True
"""
def __init__(self, path=None, refresh_apdm=True, allc2=True, loadp1=True):
"""
Initialize the WTO3 database
:type path: str
:type refresh_apdm: bool
:type allc2: bool
:type loadp1: bool
"""
self._refresh_apdm = refresh_apdm
self._allc2 = allc2
self._loadp1 = loadp1
# Default Paths and Preferences
self._wto_path = os.environ['WTO']
if path:
self._data_path = path
else:
self._data_path = os.environ['APDM_C3']
self.status = ["Canceled", "Rejected"]
self.obsproject = pd.DataFrame()
self._ares = ARes.ArrayRes(self._wto_path + 'conf/')
self._sql1 = str(
"SELECT obs1.PRJ_ARCHIVE_UID as OBSPROJECT_UID, obs1.PI, "
"obs1.PRJ_NAME,"
"CODE,PRJ_SCIENTIFIC_RANK,PRJ_VERSION,"
"PRJ_LETTER_GRADE,DOMAIN_ENTITY_STATE as PRJ_STATUS,"
"obs3.ARCHIVE_UID as OBSPROPOSAL_UID, obs4.DC_LETTER_GRADE,"
"obs3.CYCLE "
"FROM ALMA.BMMV_OBSPROJECT obs1, ALMA.OBS_PROJECT_STATUS obs2,"
" ALMA.BMMV_OBSPROPOSAL obs3, ALMA.PROPOSAL obs4 "
"WHERE regexp_like (CODE, '^201[35]\..*\.[AST]') "
"AND obs2.OBS_PROJECT_ID = obs1.PRJ_ARCHIVE_UID AND "
"obs1.PRJ_ARCHIVE_UID = obs3.PROJECTUID AND "
"obs4.ARCHIVE_UID = obs3.ARCHIVE_UID AND "
"obs4.DC_LETTER_GRADE IN ('A', 'B', 'C')")
conx_string = os.environ['CON_STR']
self._connection = cx_Oracle.connect(conx_string)
self._cursor = self._connection.cursor()
self._sql_sbstates = str(
"SELECT bs.PRJ_REF as OBSPROJECT_UID, bs.STATUS as SB_STATE,"
"SB_ARCHIVE_UID as SB_UID, EXECUTION_COUNT as EXECOUNT, "
"sbs.DOMAIN_ENTITY_STATE as SB_STATE2 "
"FROM ALMA.MV_SCHEDBLOCK bs, ALMA.SCHED_BLOCK_STATUS sbs,"
"ALMA.BMMV_OBSPROJECT obs "
"WHERE bs.SB_ARCHIVE_UID = sbs.DOMAIN_ENTITY_ID "
"AND obs.PRJ_ARCHIVE_UID = bs.PRJ_REF "
"AND regexp_like (obs.PRJ_CODE, '^201[35]\..*\.[AST]')")
self._cursor.execute(self._sql_sbstates)
self.sb_status = pd.DataFrame(
self._cursor.fetchall(),
columns=[rec[0] for rec in self._cursor.description]
).set_index('SB_UID', drop=False)
self.sb_status['EXECOUNT'] = self.sb_status.EXECOUNT.astype(float)
# self.qa0: QAO flags for observed SBs
# Query QA0 flags from AQUA tables
self._sqlqa0 = str(
"SELECT aqua.SCHEDBLOCKUID as SB_UID, aqua.EXECBLOCKUID, "
"aqua.STARTTIME, aqua.ENDTIME, aqua.QA0STATUS, shift.SE_STATUS, "
"shift.SE_PROJECT_CODE, shift.SE_ARRAYENTRY_ID, "
"aqua.FINALCOMMENTID "
"FROM ALMA.AQUA_V_EXECBLOCK aqua, ALMA.SHIFTLOG_ENTRIES shift "
"WHERE regexp_like (aqua.OBSPROJECTCODE, '^201[35]\..*\.[AST]') "
"AND aqua.EXECBLOCKUID = shift.SE_EB_UID")
self._sqlqa0com = str(
"SELECT aqua.FINALCOMMENTID, "
"DBMS_LOB.SUBSTR(acom.CCOMMENT) as COMENT "
"FROM ALMA.AQUA_V_EXECBLOCK aqua, ALMA.AQUA_COMMENT acom "
"WHERE regexp_like (aqua.OBSPROJECTCODE, '^201[35]\..*\.[AST]') "
"AND aqua.FINALCOMMENTID = acom.COMMENTID"
)
self._cursor.execute(self._sqlqa0)
self.aqua_execblock = pd.DataFrame(
self._cursor.fetchall(),
columns=[rec[0] for rec in self._cursor.description])
self._cursor.execute(self._sqlqa0com)
self._execblock_comm = pd.DataFrame(
self._cursor.fetchall(),
columns=[rec[0] for rec in self._cursor.description]
).set_index('FINALCOMMENTID', drop=False)
# self.aqdeb = self.aqua_execblock.copy()
self.aqua_execblock = pd.merge(
self.aqua_execblock, self._execblock_comm, on='FINALCOMMENTID',
how='left').set_index('SB_UID', drop=False)
self.aqua_execblock['delta'] = (self.aqua_execblock.ENDTIME -
self.aqua_execblock.STARTTIME)
self.aqua_execblock['delta'] = self.aqua_execblock.apply(
lambda x: x['delta'].total_seconds() / 3600., axis=1
)
# Query for Executives
self._sql_executive = str(
"SELECT PROJECTUID as OBSPROJECT_UID, ASSOCIATEDEXEC "
"FROM ALMA.BMMV_OBSPROPOSAL "
"WHERE regexp_like (CYCLE, '^201[35].[1A]')")
self._cursor.execute(self._sql_executive)
self.executive = pd.DataFrame(
self._cursor.fetchall(), columns=['OBSPROJECT_UID', 'EXEC'])
self._sql_obstatus_exec = str(
"SELECT obs1.ARCHIVE_UID as SB_UID,"
"obs1.PRJ_REF as OBSPROJECT_UID, obs1.SB_NAME, "
"obs1.STATUS as SB_STATE, obs1.EXECUTION_COUNT "
"FROM ALMA.BMMV_SCHEDBLOCK obs1, ALMA.BMMV_OBSPROJECT obs2 "
"WHERE obs1.PRJ_REF = obs2.PRJ_ARCHIVE_UID "
"AND regexp_like (obs2.PRJ_CODE, '^201[35]\..*\.[AST]')"
)
self._c1 = np.sqrt(self._ares.data[0][1] * self._ares.data[0][2])
self._c2 = np.sqrt(self._ares.data[1][1] * self._ares.data[1][2])
self._c3 = np.sqrt(self._ares.data[2][1] * self._ares.data[2][2])
self._c4 = np.sqrt(self._ares.data[3][1] * self._ares.data[3][2])
self._c5 = np.sqrt(self._ares.data[4][1] * self._ares.data[4][2])
self._c6 = np.sqrt(self._ares.data[5][1] * self._ares.data[5][2])
self._c7 = np.sqrt(self._ares.data[6][1] * self._ares.data[6][2])
self._c8 = np.sqrt(self._ares.data[7][1] * self._ares.data[7][2])
self._listconf = [
self._c1, self._c2, self._c3, self._c4, self._c5, self._c6,
self._c7, self._c8]
self.start_apa()
def start_apa(self, update_arch=False):
"""
Initializes the wtoDatabase dataframes.
The function queries the archive to look for cycle 1 and cycle 2
projects, disregarding any projects with status "Approved",
"Phase1Submitted", "Broken", "Canceled" or "Rejected".
The archive tables used are ALMA.BMMV_OBSPROPOSAL,
ALMA.OBS_PROJECT_STATUS, ALMA.BMMV_OBSPROJECT and
ALMA.XML_OBSPROJECT_ENTITIES.
:param update_arch:
:rtype: bool
:return: None
"""
if update_arch:
self.update_from_archive()
# noinspection PyUnusedLocal
status = self.status
# Query for Projects, from BMMV.
self._cursor.execute(self._sql1)
self._df1 = pd.DataFrame(
self._cursor.fetchall(),
columns=[rec[0] for rec in self._cursor.description])
if self._allc2:
self._df1 = self._df1.query(
'(CYCLE in ["2015.1", "2015.A"]) or '
'(CYCLE in ["2013.1", "2013.A"] and '
' DC_LETTER_GRADE == ["A", "B", "C"])').copy()
else:
self._df1 = self._df1.query(
'(CYCLE in ["2015.1", "2015.A"]) or '
'(CYCLE in ["2013.1", "2013.A"] and '
'DC_LETTER_GRADE == "A")').copy()
self.projects = pd.merge(
self._df1.query('PRJ_STATUS not in @status'), self.executive,
on='OBSPROJECT_UID'
).set_index('CODE', drop=False)
# print(len(self._df1.query('PRJ_STATUS not in @status')))
self.projects = pd.merge(
self._df1.query('PRJ_STATUS not in @status'), self.executive,
on='OBSPROJECT_UID'
).set_index('CODE', drop=False)
self.projects['xmlfile'] = self.projects.apply(
lambda r: r['OBSPROJECT_UID'].replace('://', '___').replace(
'/', '_') + '.xml', axis=1
)
self.projects['phase'] = self.projects.apply(
lambda r: 'I' if r['PRJ_STATUS'] in PHASE_I_STATUS else 'II',
axis=1
)
if not self._loadp1:
self.projects = self.projects.query('phase == "II"').copy()
if self._refresh_apdm:
print("Downloading APDM data for %d projects...\n" %
len(self.projects))
phase1uids = self.projects.query(
'phase == "I"').OBSPROJECT_UID.unique()
get_all_apdm(self._cursor, self._data_path,
self.projects.OBSPROJECT_UID.unique(),
phase1uids)
self._load_obsprojects(path=self._data_path + 'obsproject/')
self._load_sciencegoals()
self._load_sblocks_meta()
self._load_schedblocks()
self._add_imaging_param()
self._create_extrainfo()
def _add_imaging_param(self):
self._schedblocks_temp['assumedconf_ar_ot'] = (
self._schedblocks_temp.minAR_ot / 0.9) * \
self._schedblocks_temp.repfreq / 100.
self._schedblocks_temp['OT_BestConf'] = self._schedblocks_temp.apply(
lambda x: self._ares.array[
DsaTool.find_array(x['assumedconf_ar_ot'], self._listconf)] if
x['array'] == "TWELVE-M" else "N/A",
axis=1)
ar = self._schedblocks_temp.apply(lambda x: self._get_ar_lim(x), axis=1)
# noinspection PyUnresolvedReferences
self.schedblocks = pd.concat(
[self._schedblocks_temp, ar], axis=1).set_index(
'SB_UID', drop=False)
def _create_extrainfo(self):
target_tables_temp = pd.merge(
self.orderedtar.query('name != "Calibrators"'),
self.target, on=['SB_UID', 'targetId'])
target_tables_temp2 = pd.merge(
target_tables_temp, self.scienceparam, on=['SB_UID', 'paramRef'])
target_tables_temp3 = pd.merge(
target_tables_temp2, self.fieldsource[
['SB_UID', 'fieldRef', 'name', 'RA', 'DEC', 'isQuery', 'use',
'solarSystem', 'isMosaic', 'pointings', 'ephemeris']
], on=['SB_UID', 'fieldRef'],
suffixes=['_target', '_so'])
self.target_tables = target_tables_temp3.copy().set_index(
'targetId', drop=False)
sb_target_num = self.target_tables.groupby('SB_UID').agg(
{'fieldRef': pd.Series.nunique, 'pointings': pd.Series.max,
'targetId': pd.Series.nunique, 'paramRef': pd.Series.nunique,
'specRef': pd.Series.nunique}).reset_index()
self.multi_point_su = sb_target_num.query(
'pointings > 1').SB_UID.unique()
self.multi_field_su = sb_target_num.query(
'fieldRef > 1').SB_UID.unique()
self.ephem_su = self.target_tables.query(
'solarSystem != "Unspecified"').SB_UID.unique()
def _load_obsprojects(self, path):
"""
:type path: str
"""
projt = []
for r in self.projects.iterrows():
xml = r[1].xmlfile
proj = self._read_obsproject(xml, path)
projt.append(proj)
projt_arr = np.array(projt, dtype=object)
self.obsproject = pd.DataFrame(
projt_arr,
columns=['CODE', 'OBSPROJECT_UID', 'OBSPROPOSAL_UID',
'OBSREVIEW_UID', 'VERSION',
'NOTE', 'IS_CALIBRATION', 'IS_DDT']
).set_index('OBSPROJECT_UID', drop=False)
@staticmethod
def _read_obsproject(xml, path):
"""
:type path: str
:type xml: str
"""
try:
obsparse = ObsProject(xml, path)
except KeyError:
print("Something went wrong while trying to parse %s" % xml)
return 0
return obsparse.get_info()
def _load_sciencegoals(self):
sgt = []
tart = []
visitt = []
temp_paramt = []
sgspwt = []
sgspsct = []
for r in self.obsproject.iterrows():
obsproject_uid = r[1].OBSPROJECT_UID
obsproposal_uid = r[1].OBSPROPOSAL_UID
if obsproject_uid is None:
continue
xml = obsproposal_uid.replace('://', '___').replace('/', '_')
xml += '.xml'
code = r[1].CODE
obspropparse = self._read_sciencegoal(code, xml, obsproject_uid)
if obspropparse == 0:
continue
sgt.extend(obspropparse.sciencegoals)
tart.extend(obspropparse.sg_targets)
if len(obspropparse.sg_specscan) > 0:
sgspsct.extend(obspropparse.sg_specscan)
if len(obspropparse.sg_specwindows) > 0:
sgspwt.extend(obspropparse.sg_specwindows)
if len(obspropparse.visits) > 0:
visitt.extend(obspropparse.visits)
if len(obspropparse.temp_param) > 0:
temp_paramt.extend(obspropparse.temp_param)
sgt_arr = np.array(sgt, dtype=object)
tart_arr = np.array(tart, dtype=object)
visitt_arr = np.array(visitt, dtype=object)
temp_paramt_arr = np.array(temp_paramt, dtype=object)
sgspwt_arr = np.array(sgspwt, dtype=object)
sgspsct_arr = np.array(sgspsct, dtype=object)
self.sciencegoals = pd.DataFrame(
sgt_arr,
columns=['SG_ID', 'OBSPROJECT_UID', 'OUS_ID', 'sg_name', 'band',
'estimatedTime', 'est12Time', 'estACATime',
'est7Time', 'eTPTime',
'AR', 'LAS', 'ARcor', 'LAScor', 'sensitivity',
'useACA', 'useTP', 'isTimeConstrained', 'repFreq',
'repFreq_spec', 'singleContFreq', 'isCalSpecial',
'isPointSource', 'polarization', 'isSpectralScan', 'type',
'hasSB', 'dummy', 'num_targets', 'mode']
).set_index('SG_ID', drop=False)
self.sg_targets = pd.DataFrame(
tart_arr,
columns=['TARG_ID', 'OBSPROJECT_UID', 'SG_ID', 'tarType',
'solarSystem', 'sourceName', 'RA', 'DEC', 'isMosaic',
'centerVel', 'centerVel_units', 'centerVel_refsys',
'centerVel_doppler', 'lineWidth']
).set_index('TARG_ID', drop=False)
self.visits = pd.DataFrame(
visitt_arr,
columns=['SG_ID', 'sgName', 'OBSPROJECT_UID', 'startTime', 'margin',
'margin_unit', 'note', 'avoidConstraint', 'priority',
'visit_id', 'prev_visit_id', 'requiredDelay',
'requiredDelay_unit', 'fixedStart']
)
self.temp_param = pd.DataFrame(
temp_paramt_arr,
columns=['SG_ID', 'sgName', 'OBSPROJECT_UID', 'startTime',
'endTime', 'margin', 'margin_unit', 'repeats', 'LSTmin',
'LSTmax', 'note', 'avoidConstraint', 'priority',
'fixedStart']
)
self.sg_spw = pd.DataFrame(
sgspwt_arr,
columns=['SG_ID', 'SPW_ID', 'transitionName', 'centerFrequency',
'bandwidth', 'spectralRes', 'isRepSPW', 'isSkyFreq',
'group_index']
)
self.sg_specscan = pd.DataFrame(
sgspsct_arr,
columns=['SG_ID', 'SSCAN_ID', 'startFrequency', 'endFrequency',
'bandwidth', 'spectralRes', 'isSkyFreq']
)
def _read_sciencegoal(self, code, xml, obsproject_uid):
try:
if self.projects.ix[code, 'phase'] == 'I':
obspropparse = ObsProposal(
xml, obsproject_uid, self._data_path + 'obsproposal/')
obspropparse.get_sg()
else:
# print "Processing Phase II %s" % r[1].CODE
xml = obsproject_uid.replace('://', '___').replace(
'/', '_')
xml += '.xml'
obspropparse = ObsProject(
xml, self._data_path + 'obsproject/')
obspropparse.get_sg()
return obspropparse
except IOError:
print("Something went wrong while trying to parse %s" % xml)
return 0
def _load_sblocks_meta(self):
sbt = []
for r in self.obsproject.iterrows():
code = r[1].CODE
phase = self.projects.ix[code, 'phase']
parse = self._read_sblock_meta(phase, r)
if parse == 0:
continue
sbt.extend(parse.sg_sb)
sbt_arr = np.array(sbt, dtype=object)
self.sblocks = pd.DataFrame(
sbt_arr,
columns=['SB_UID', 'OBSPROJECT_UID', 'ous_name', 'OUS_ID',
'GOUS_ID',
'gous_name', 'MOUS_ID', 'mous_name',
'array', 'execount']
).set_index('SB_UID', drop=False)
self.sblocks['sg_name'] = self.sblocks.ous_name.str.replace(
"SG OUS \(", "")
self.sblocks['sg_name'] = self.sblocks.sg_name.str.slice(0, -1)
def _read_sblock_meta(self, phase, r):
if phase == 'I':
obsreview_uid = r[1].OBSREVIEW_UID
if obsreview_uid is None:
return 0
xml = obsreview_uid.replace('://', '___').replace('/', '_')
xml += '.xml'
try:
parse = ObsReview(xml, self._data_path + 'obsreview/')
parse.get_sg_sb()
except IOError:
print("Something went wrong while trying to parse %s" % xml)
return 0
else:
obsproject_uid = r[1].OBSPROJECT_UID
xml = obsproject_uid.replace('://', '___').replace('/', '_')
xml += '.xml'
try:
parse = ObsProject(xml, self._data_path + 'obsproject/')
parse.get_sg_sb()
except IOError:
print("Something went wrong while trying to parse %s" % xml)
return 0
return parse
def _load_schedblocks(self, sb_path='schedblock/'):
path = self._data_path + sb_path
rst = []
rft = []
tart = []
spwt = []
bbt = []
spct = []
scpart = []
acpart = []
bcpart = []
pcpart = []
ordtart = []
sys.stdout.write("Processing Phase II SBs ")
sys.stdout.flush()
c = 10
i = 0
n = len(self.sblocks)
for sg_sb in self.sblocks.iterrows():
i += 1
if (100. * i / n) > c:
sys.stdout.write('.')
sys.stdout.flush()
c += 10
xmlf = sg_sb[1].SB_UID.replace('://', '___')
xmlf = xmlf.replace('/', '_') + '.xml'
sb1 = SchedBlock(
xmlf, sg_sb[1].SB_UID, sg_sb[1].OBSPROJECT_UID, sg_sb[1].OUS_ID,
sg_sb[1].sg_name, path)
rs, rf, tar, spc, bb, spw, scpar, acpar, bcpar, pcpar, ordtar = \
sb1.read_schedblocks()
rst.append(rs)
rft.extend(rf)
tart.extend(tar)
spct.extend(spc)
bbt.extend(bb)
spwt.extend(spw)
scpart.extend(scpar)
acpart.extend(acpar)
bcpart.extend(bcpar)
pcpart.extend(pcpar)
ordtart.extend(ordtar)
sys.stdout.write("\nDone!\n")
sys.stdout.flush()
rst_arr = np.array(rst, dtype=object)
rft_arr = np.array(rft, dtype=object)
tart_arr = np.array(tart, dtype=object)
spct_arr = np.array(spct, dtype=object)
bbt_arr = np.array(bbt, dtype=object)
spwt_arr = np.array(spwt, dtype=object)
scpart_arr = np.array(scpart, dtype=object)
acpart_arr = np.array(acpart, dtype=object)
bcpart_arr = np.array(bcpart, dtype=object)
pcpart_arr = np.array(pcpart, dtype=object)
ordtart_arr = np.array(ordtart, dtype=object)
self._schedblocks_temp = pd.DataFrame(
rst_arr,
columns=['SB_UID', 'OBSPROJECT_UID', 'SG_ID', 'OUS_ID',
'sbName', 'sbNote', 'sbStatusXml', 'repfreq', 'band',
'array',
'RA', 'DEC', 'minAR_ot', 'maxAR_ot', 'execount',
'isPolarization', 'maxPWVC', 'array12mType',
'estimatedTime', 'maximumTime'],
).set_index('SB_UID', drop=False)
tof = ['repfreq', 'RA', 'DEC', 'minAR_ot', 'maxAR_ot', 'maxPWVC']
self._schedblocks_temp[tof] = self._schedblocks_temp[tof].astype(float)
self._schedblocks_temp[['execount']] = self._schedblocks_temp[
['execount']].astype(int)
self.scienceparam = pd.DataFrame(
scpart_arr,
columns=['paramRef', 'SB_UID', 'parName', 'representative_bw',
'sensitivy', 'sensUnit', 'intTime', 'subScanDur']
).set_index('paramRef', drop=False)
self.ampcalparam = pd.DataFrame(
acpart_arr,
columns=['paramRef', 'SB_UID', 'parName', 'intTime',
'subScanDur']
).set_index('paramRef', drop=False)
self.bbandcalparam = pd.DataFrame(
bcpart_arr,
columns=['paramRef', 'SB_UID', 'parName', 'intTime',
'subScanDur']
).set_index('paramRef', drop=False)
self.phasecalparam = pd.DataFrame(
pcpart_arr,
columns=['paramRef', 'SB_UID', 'parName', 'intTime',
'subScanDur']
).set_index('paramRef', drop=False)
self.orderedtar = pd.DataFrame(
ordtart_arr,
columns=['targetId', 'SB_UID', 'indObs', 'name']
).set_index('targetId', drop=False)
self.fieldsource = pd.DataFrame(
rft_arr,
columns=['fieldRef', 'SB_UID', 'solarSystem', 'sourcename',
'name', 'RA', 'DEC', 'isQuery', 'intendedUse', 'qRA',
'qDEC', 'use', 'search_radius', 'rad_unit',
'ephemeris', 'pointings', 'isMosaic', 'arraySB']
).set_index('fieldRef', drop=False)
self.target = pd.DataFrame(
tart_arr,
columns=['targetId', 'SB_UID', 'specRef', 'fieldRef',
'paramRef']).set_index('targetId', drop=False)
self.spectralconf = pd.DataFrame(
spct_arr,
columns=['specRef', 'SB_UID', 'Name', 'BaseBands', 'SPWs']
).set_index('specRef', drop=False)
self.spectralconf[['BaseBands', 'SPWs']] = self.spectralconf[
['BaseBands', 'SPWs']].astype(int)
self.baseband = pd.DataFrame(
bbt_arr,
columns=['basebandRef', 'specRef', 'SB_UID', 'Name',
'CenterFreq', 'FreqSwitching', 'l02Freq',
'Weighting', 'useUDB']
).set_index('basebandRef', drop=False)
tof = ['CenterFreq', 'l02Freq', 'Weighting']
tob = ['FreqSwitching', 'useUDB']
self.baseband[tof] = self.baseband[tof].astype(float)
self.baseband[tob] = self.baseband[tob].astype(bool)
tof = ['CenterFreq', 'EffectiveBandwidth', 'lineRestFreq']
toi = ['AveragingFactor', 'EffectiveChannels']
tob = ['Use']
self.spectralwindow = pd.DataFrame(
spwt_arr,
columns=['basebandRef', 'SB_UID', 'Name',
'SideBand', 'WindowsFunction',
'CenterFreq', 'AveragingFactor',
'EffectiveBandwidth', 'EffectiveChannels', 'lineRestFreq',
'lineName', 'Use'],
).set_index('basebandRef', drop=False)
self.spectralwindow[tof] = self.spectralwindow[tof].astype(float)
self.spectralwindow[toi] = self.spectralwindow[toi].astype(int)
self.spectralwindow[tob] = self.spectralwindow[tob].astype(bool)
def _update_apdm(self, obsproject_uid):
proj_xmlfile = get_apdm(self._cursor, self._data_path, obsproject_uid)
proj = [self._read_obsproject(
proj_xmlfile, self._data_path + 'obsproject/')]
projt_arr = np.array(proj, dtype=object)
obsproject = pd.DataFrame(
projt_arr,
columns=['CODE', 'OBSPROJECT_UID', 'OBSPROPOSAL_UID',
'OBSREVIEW_UID', 'VERSION',
'NOTE', 'IS_CALIBRATION', 'IS_DDT']
).set_index('OBSPROJECT_UID', drop=False)
self.obsproject.update(obsproject)
self.update_from_archive()
self._update_sciencegoal(obsproject_uid)
self._update_sblock_meta(obsproject_uid)
self._update_schedblock(obsproject_uid)
self._add_imaging_param()
self._create_extrainfo()
def _update_sciencegoal(self, obsproject_uid):
obsproposal_uid = self.obsproject.ix[obsproject_uid, 'OBSPROPOSAL_UID']
prop_xmlfile = obsproposal_uid.replace('://', '___').replace('/', '_')
prop_xmlfile += '.xml'
code = self.obsproject.ix[obsproject_uid, 'CODE']
obspropparse = self._read_sciencegoal(
code, prop_xmlfile, obsproject_uid)
for sg in obspropparse.sciencegoals:
self.sciencegoals.ix[sg[0]] = np.array(sg, dtype=object)
# noinspection PyUnusedLocal
sg_ids = self.sciencegoals.query(
'OBSPROJECT_UID in @obsproject_uid').SG_ID.unique()
for tar in obspropparse.sg_targets:
self.sg_targets.ix[tar[0]] = np.array(tar, dtype=object)
self.sg_spw.drop(self.sg_spw.query('SG_ID in @sg_ids').index.values,
inplace=True, errors='ignore')
spw = pd.DataFrame(
np.array(obspropparse.sg_specwindows, dtype=object),
columns=['SG_ID', 'SPW_ID', 'transitionName', 'centerFrequency',
'bandwidth', 'spectralRes', 'isRepSPW', 'isSkyFreq',
'group_index']
)
self.sg_spw = self.sg_spw.append(spw, ignore_index=True)
if len(obspropparse.sg_specscan) > 0:
self.sg_specscan.drop(
self.sg_specscan.query('SG_ID in @sg_ids').index.values,
inplace=True, errors='ignore')
specscan = pd.DataFrame(
np.array(obspropparse.sg_specscan, dtype=object),
columns=['SG_ID', 'SSCAN_ID', 'startFrequency', 'endFrequency',
'bandwidth', 'spectralRes', 'isSkyFreq']
)
self.sg_specscan = self.sg_specscan.append(
specscan, ignore_index=True)
if len(obspropparse.visits) > 0:
self.visits.drop(
self.visits.query('SG_ID in @sg_ids').index.values,
inplace=True, errors='ignore')
visit = pd.DataFrame(
np.array(obspropparse.visits, dtype=object),
columns=[
'SG_ID', 'sgName', 'OBSPROJECT_UID', 'startTime', 'margin',
'margin_unit', 'note', 'avoidConstraint', 'priority',
'visit_id', 'prev_visit_id', 'requiredDelay',
'requiredDelay_unit', 'fixedStart']
)
self.visits = self.visits.append(visit, ignore_index=True)
if len(obspropparse.temp_param) > 0:
self.temp_param.drop(
self.temp_param.query('SG_ID in @sg_ids').index.values,
inplace=True, errors='ignore')
temppar = pd.DataFrame(
np.array(obspropparse.temp_param, dtype=object),
columns=[
'SG_ID', 'sgName', 'OBSPROJECT_UID', 'startTime',
'endTime', 'margin', 'margin_unit', 'repeats', 'LSTmin',
'LSTmax', 'note', 'avoidConstraint', 'priority',
'fixedStart']
)
self.temp_param = self.temp_param.append(temppar, ignore_index=True)
def _update_sblock_meta(self, obsproject_uid):
r = [0, None]
r[1] = self.obsproject.ix[obsproject_uid]
parse = self._read_sblock_meta('II', r)
sbt = []
sbt.extend(parse.sg_sb)
sbt_arr = np.array(sbt, dtype=object)
# print sbt_arr, sbt_arr.shape
sblocks = pd.DataFrame(
sbt_arr,
columns=['SB_UID', 'OBSPROJECT_UID', 'ous_name', 'OUS_ID',
'GOUS_ID',
'gous_name', 'MOUS_ID', 'mous_name',
'array', 'execount']
).set_index('SB_UID', drop=False)
sblocks['sg_name'] = sblocks.ous_name.str.replace(
"SG OUS \(", "")
sblocks['sg_name'] = sblocks.sg_name.str.slice(0, -1)
self.sblocks.update(sblocks)
def _update_schedblock(self, obsproject_uid, sb_path='schedblock/'):
path = self._data_path + sb_path
rst = []
rft = []
tart = []
spwt = []
bbt = []
spct = []
scpart = []
acpart = []
bcpart = []
pcpart = []
ordtart = []
print "Updating SBs of %s." % obsproject_uid
sb_uids = []
for sg_sb in self.sblocks.query('OBSPROJECT_UID == @obsproject_uid'
).iterrows():
sb_uids.append(sg_sb[1].SB_UID)
xmlf = sg_sb[1].SB_UID.replace('://', '___')
xmlf = xmlf.replace('/', '_') + '.xml'
sb1 = SchedBlock(
xmlf, sg_sb[1].SB_UID, sg_sb[1].OBSPROJECT_UID, sg_sb[1].OUS_ID,
sg_sb[1].sg_name, path)
rs, rf, tar, spc, bb, spw, scpar, acpar, bcpar, pcpar, ordtar = \
sb1.read_schedblocks()
rst.append(rs)
rft.extend(rf)
tart.extend(tar)
spct.extend(spc)
bbt.extend(bb)
spwt.extend(spw)
scpart.extend(scpar)
acpart.extend(acpar)
bcpart.extend(bcpar)
pcpart.extend(pcpar)
ordtart.extend(ordtar)
rst_arr = np.array(rst, dtype=object)
rft_arr = np.array(rft, dtype=object)
tart_arr = np.array(tart, dtype=object)
spct_arr = np.array(spct, dtype=object)
bbt_arr = np.array(bbt, dtype=object)
spwt_arr = np.array(spwt, dtype=object)
scpart_arr = np.array(scpart, dtype=object)
acpart_arr = np.array(acpart, dtype=object)
bcpart_arr = np.array(bcpart, dtype=object)
pcpart_arr = np.array(pcpart, dtype=object)
ordtart_arr = np.array(ordtart, dtype=object)
self._schedblocks_temp.drop(
self._schedblocks_temp.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
rst_df = pd.DataFrame(
rst_arr,
columns=['SB_UID', 'OBSPROJECT_UID', 'SG_ID', 'OUS_ID',
'sbName', 'sbNote', 'sbStatusXml', 'repfreq', 'band',
'array',
'RA', 'DEC', 'minAR_ot', 'maxAR_ot', 'execount',
'isPolarization', 'maxPWVC', 'array12mType',
'estimatedTime', 'maximumTime'],
).set_index('SB_UID', drop=False)
self._schedblocks_temp = self._schedblocks_temp.append(rst_df)
tof = ['repfreq', 'RA', 'DEC', 'minAR_ot', 'maxAR_ot', 'maxPWVC']
self._schedblocks_temp[tof] = self._schedblocks_temp[tof].astype(float)
self._schedblocks_temp[['execount']] = self._schedblocks_temp[
['execount']].astype(int)
self.scienceparam.drop(
self.scienceparam.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
scienceparam = pd.DataFrame(
scpart_arr,
columns=['paramRef', 'SB_UID', 'parName', 'representative_bw',
'sensitivy', 'sensUnit', 'intTime', 'subScanDur']
).set_index('paramRef', drop=False)
self.scienceparam = self.scienceparam.append(scienceparam)
self.ampcalparam.drop(
self.ampcalparam.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
ampcalparam = pd.DataFrame(
acpart_arr,
columns=['paramRef', 'SB_UID', 'parName', 'intTime',
'subScanDur']
).set_index('paramRef', drop=False)
self.ampcalparam = self.ampcalparam.append(ampcalparam)
self.bbandcalparam.drop(
self.bbandcalparam.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
bbandcalparam = pd.DataFrame(
bcpart_arr,
columns=['paramRef', 'SB_UID', 'parName', 'intTime',
'subScanDur']
).set_index('paramRef', drop=False)
self.bbandcalparam = self.bbandcalparam.append(bbandcalparam)
self.phasecalparam.drop(
self.phasecalparam.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
phasecalparam = pd.DataFrame(
pcpart_arr,
columns=['paramRef', 'SB_UID', 'parName', 'intTime',
'subScanDur']
).set_index('paramRef', drop=False)
self.phasecalparam = self.phasecalparam.append(phasecalparam)
self.orderedtar.drop(
self.orderedtar.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
orderedtar = pd.DataFrame(
ordtart_arr,
columns=['targetId', 'SB_UID', 'indObs', 'name']
).set_index('targetId', drop=False)
self.orderedtar = self.orderedtar.append(orderedtar)
self.fieldsource.drop(
self.fieldsource.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
fieldsource = pd.DataFrame(
rft_arr,
columns=['fieldRef', 'SB_UID', 'solarSystem', 'sourcename',
'name', 'RA', 'DEC', 'isQuery', 'intendedUse', 'qRA',
'qDEC', 'use', 'search_radius', 'rad_unit',
'ephemeris', 'pointings', 'isMosaic', 'arraySB']
).set_index('fieldRef', drop=False)
self.fieldsource = self.fieldsource.append(fieldsource)
self.target.drop(
self.target.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
target = pd.DataFrame(
tart_arr,
columns=['targetId', 'SB_UID', 'specRef', 'fieldRef',
'paramRef']).set_index('targetId', drop=False)
self.target = self.target.append(target)
self.spectralconf.drop(
self.spectralconf.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
spectralconf = pd.DataFrame(
spct_arr,
columns=['specRef', 'SB_UID', 'Name', 'BaseBands', 'SPWs']
).set_index('specRef', drop=False)
self.spectralconf = self.spectralconf.append(spectralconf)
self.spectralconf[['BaseBands', 'SPWs']] = self.spectralconf[
['BaseBands', 'SPWs']].astype(int)
self.baseband.drop(
self.baseband.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
baseband = pd.DataFrame(
bbt_arr,
columns=['basebandRef', 'specRef', 'SB_UID', 'Name',
'CenterFreq', 'FreqSwitching', 'l02Freq',
'Weighting', 'useUDB']
).set_index('basebandRef', drop=False)
self.baseband = self.baseband.append(baseband)
tof = ['CenterFreq', 'l02Freq', 'Weighting']
tob = ['FreqSwitching', 'useUDB']
self.baseband[tof] = self.baseband[tof].astype(float)
self.baseband[tob] = self.baseband[tob].astype(bool)
self.spectralwindow.drop(
self.spectralwindow.query('SB_UID in @sb_uids').index.values,
inplace=True, errors='ignore')
spectralwindow = pd.DataFrame(
spwt_arr,
columns=['basebandRef', 'SB_UID', 'Name',
'SideBand', 'WindowsFunction',
'CenterFreq', 'AveragingFactor',
'EffectiveBandwidth', 'EffectiveChannels', 'lineRestFreq',
'lineName', 'Use'],
).set_index('basebandRef', drop=False)
self.spectralwindow = self.spectralwindow.append(spectralwindow)
tof = ['CenterFreq', 'EffectiveBandwidth', 'lineRestFreq']
toi = ['AveragingFactor', 'EffectiveChannels']
tob = ['Use']
self.spectralwindow[tof] = self.spectralwindow[tof].astype(float)
self.spectralwindow[toi] = self.spectralwindow[toi].astype(int)
self.spectralwindow[tob] = self.spectralwindow[tob].astype(bool)
def update_from_archive(self):
self._cursor.execute(self._sql1)
self._df1 = pd.DataFrame(
self._cursor.fetchall(),
columns=[rec[0] for rec in self._cursor.description])
self._cursor.execute(self._sql_executive)
self.executive = pd.DataFrame(
self._cursor.fetchall(), columns=['OBSPROJECT_UID', 'EXEC'])
self._old_projects = self.projects.copy()
# noinspection PyUnusedLocal
status = self.status
if self._allc2:
self._df1 = self._df1.query(
'(CYCLE in ["2015.1", "2015.A"]) or '
'(CYCLE in ["2013.1", "2013.A"] and '
' DC_LETTER_GRADE == ["A", "B", "C"])').copy()
else:
self._df1 = self._df1.query(
'(CYCLE in ["2015.1", "2015.A"]) or '
'(CYCLE in ["2013.1", "2013.A"] and '
'DC_LETTER_GRADE == "A")').copy()
self.projects = pd.merge(
self._df1.query('PRJ_STATUS not in @status'), self.executive,
on='OBSPROJECT_UID'
).set_index('CODE', drop=False)
self.projects['xmlfile'] = self.projects.apply(
lambda r: r['OBSPROJECT_UID'].replace('://', '___').replace(
'/', '_') + '.xml', axis=1
)
self.projects['phase'] = self.projects.apply(
lambda r: 'I' if r['PRJ_STATUS'] in PHASE_I_STATUS else 'II',
axis=1
)
if not self._loadp1:
self.projects = self.projects.query('phase == "II"').copy()
def update_status(self):
self._cursor.execute(self._sqlqa0)
self.aqua_execblock = pd.DataFrame(
self._cursor.fetchall(),
columns=[rec[0] for rec in self._cursor.description])
self._cursor.execute(self._sqlqa0com)
self._execblock_comm = pd.DataFrame(
self._cursor.fetchall(),
columns=[rec[0] for rec in self._cursor.description]
).set_index('FINALCOMMENTID', drop=False)
self.aqua_execblock = pd.merge(
self.aqua_execblock, self._execblock_comm, on='FINALCOMMENTID',
how='left').set_index('SB_UID', drop=False)
self.aqua_execblock['delta'] = (self.aqua_execblock.ENDTIME -
self.aqua_execblock.STARTTIME)
self.aqua_execblock['delta'] = self.aqua_execblock.apply(
lambda x: x['delta'].total_seconds() / 3600., axis=1
)
self._cursor.execute(self._sql_sbstates)
self.sb_status = pd.DataFrame(
self._cursor.fetchall(),
columns=[rec[0] for rec in self._cursor.description]
).set_index('SB_UID', drop=False)
self.sb_status['EXECOUNT'] = self.sb_status.EXECOUNT.astype(float)
# noinspection PyUnusedLocal
def _get_ar_lim(self, sbrow):
ouid = sbrow['OBSPROJECT_UID']
sgn = sbrow['SG_ID']
uid = sbrow['SB_UID']
ousid = sbrow['OUS_ID']
sgrow = self.sciencegoals.query('OBSPROJECT_UID == @ouid and '
'(sg_name == @sgn or '
' OUS_ID == @ousid)')
if len(sgrow) == 0:
sgn = sbrow['SG_ID'].rstrip()
sgn = sgn.lstrip()
sgrow = self.sciencegoals.query(
'OBSPROJECT_UID == @ouid and (sg_name == @sgn or '
' OUS_ID == @ousid)')
if len(sgrow) == 0:
if sbrow['SG_ID'] == 'CO(4-3), [CI] 1-0 setup':
sgn = 'CO(4-3), [CI]1-0 setup'
elif sbrow['SG_ID'] == 'CO(7-6), [CI] 2-1 setup':
sgn = 'CO(7-6), [CI]2-1 setup'
elif sbrow['SG_ID'] == 'CRL618: HNC & HCO+ 3-2 + H29a + HC3N 28-27':
sgn = 'CRL618: HNC &HCO+ 3-2 + H29a + HC3N 28-27'
elif sbrow['SG_ID'] == 'HCN, H13CN & HC15N J=8-7':
sgn = 'HCN, H13CN &HC15N J=8-7'
else:
sgn = sgn
sgrow = self.sciencegoals.query(
'OBSPROJECT_UID == @ouid and (sg_name == @sgn or '
' OUS_ID == @ousid)')
sbs = self._schedblocks_temp.query(
'OBSPROJECT_UID == @ouid and SG_ID == @sgn and array == "TWELVE-M"')
isextended = True
sb_bl_num = len(sbs)
sb_7m_num = len(self._schedblocks_temp.query(
'OBSPROJECT_UID == @ouid and SG_ID == @sgn and array == "SEVEN-M"'))
sb_tp_num = len(self._schedblocks_temp.query(
'OBSPROJECT_UID == @ouid and SG_ID == @sgn and '
'array == "TP-Array"'))
if sbrow['array'] != "TWELVE-M":
return pd.Series(
[None, None, 'N/A', 0, sb_bl_num, sb_7m_num, sb_tp_num],
index=["minAR", "maxAR", "BestConf", "two_12m", "SB_BL_num",
"SB_7m_num", "SB_TP_num"])
if len(sgrow) == 0:
print "What? %s" % uid
return pd.Series(
[0, 0, 'E', 0, sb_bl_num, sb_7m_num, sb_tp_num],
index=["minAR", "maxAR", "BestConf", "two_12m", "SB_BL_num",
"SB_7m_num", "SB_TP_num"])
else:
sgrow = sgrow.iloc[0]
num12 = 1
if len(sbs) > 1:
two = sbs[sbs.array12mType.str.contains('Comp')]
if len(two) > 0:
num12 = 2
isextended = True
if sbrow['sbName'].endswith('_TC'):
isextended = False
# noinspection PyBroadException
try:
minar, maxar, conf1, conf2 = self._ares.run(
sgrow['ARcor'], sgrow['LAScor'], sbrow['DEC'], sgrow['useACA'],
num12, sbrow['OT_BestConf'], uid)
except:
print "Exception, %s" % uid
print sgrow['ARcor'], sgrow['LAScor'], sbrow['DEC'], sgrow['useACA']
return pd.Series(
[0, 0, 'C', num12, sb_bl_num, sb_7m_num, sb_tp_num],
index=["minAR", "maxAR", "BestConf", "two_12m", "SB_BL_num",
"SB_7m_num", "SB_TP_num"])
if not isextended:
return pd.Series(
[minar[1], maxar[1], conf2, num12, sb_bl_num, sb_7m_num,
sb_tp_num],
index=["minAR", "maxAR", "BestConf", "two_12m", "SB_BL_num",
"SB_7m_num", "SB_TP_num"])
return pd.Series(
[minar[0], maxar[0], conf1, num12, sb_bl_num, sb_7m_num, sb_tp_num],
index=["minAR", "maxAR", "BestConf", "two_12m", "SB_BL_num",
"SB_7m_num", "SB_TP_num"])
| itoledoc/gWTO3 | DsaDataBase3.py | Python | gpl-2.0 | 44,632 | [
"VisIt"
] | 98fc104629b5652343cfe5cfbbadf9def1166bebafb15cb3d3b5b0be0fa163f4 |
# goal: find an embedding based on RMSD (dis)similarities
import numpy as np
import pylab as pl
import numpy.random as npr
pl.rcParams['image.cmap'] = 'gray'
# import data
from msmbuilder.example_datasets import AlanineDipeptide
dataset = AlanineDipeptide().get()
ala_trajectories = dataset.trajectories
from msmbuilder.example_datasets import FsPeptide
dataset = FsPeptide().get()
fs_trajectories = dataset.trajectories
t = ala_trajectories[0]
fs_t = fs_trajectories[0]
# compute pairwise RMSD among snapshots of the chosen trajectory
import mdtraj
n = 2000
rmsd_fs = np.zeros((n,n))
rmsd_ala = np.zeros((n,n))
for i in range(1,n):
rmsd_ala[i,:i] = mdtraj.rmsd(t[:i],t[i])
rmsd_fs[i,:i] = mdtraj.rmsd(fs_t[:i],fs_t[i])
rmsd_ala = (rmsd_ala + rmsd_ala.T)[1:,1:]
rmsd_fs = (rmsd_fs + rmsd_fs.T)[1:,1:]
# plot figures
pl.rc('font', family='serif')
pl.imshow(rmsd_ala,interpolation='none')
pl.colorbar()
pl.title('Pairwise RMSD: Alanine dipeptide')
pl.savefig('figures/alanine-dipeptide-rmsd-gray.jpg',dpi=300)
pl.close()
pl.imshow(rmsd_fs,interpolation='none')
pl.colorbar()
pl.title('Pairwise RMSD: Fs peptide')
pl.savefig('figures/fs-peptide-rmsd-gray.jpg',dpi=300)
pl.close() | maxentile/msm-learn | projects/rmsd/rmsd_dr.py | Python | mit | 1,196 | [
"MDTraj"
] | 057a9d045a3c0a811b97df19e2ba2294b6b56c2c300ada26f1c501b6df2af603 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from ase.units import kJ
from sklearn.metrics import mean_squared_error
from math import sqrt
#rms = sqrt(mean_squared_error(y_actual, y_predicted))
import numpy as np
try:
from scipy.optimize import curve_fit
except ImportError:
try:
from scipy.optimize import leastsq
# this part comes from
# http://projects.scipy.org/scipy/browser/trunk/scipy/optimize/minpack.py
def _general_function(params, xdata, ydata, function):
return function(xdata, *params) - ydata
# end of this part
def curve_fit(f, x, y, p0):
func = _general_function
args = (x, y, f)
# this part comes from
# http://projects.scipy.org/scipy/browser/trunk/scipy/optimize/minpack.py
popt, pcov, infodict, mesg, ier = leastsq(func, p0, args=args,
full_output=1)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + mesg)
# end of this part
return popt, pcov
except ImportError:
curve_fit = None
eos_names = ['sj', 'taylor', 'murnaghan', 'birch', 'birchmurnaghan',
'pouriertarantola', 'vinet', 'antonschmidt', 'p3']
def taylor(V, E0, beta, alpha, V0):
'Taylor Expansion up to 3rd order about V0'
E = E0 + beta / 2 * (V - V0)**2 / V0 + alpha / 6 * (V - V0)**3 / V0
return E
def murnaghan(V, E0, B0, BP, V0):
'From PRB 28,5480 (1983'
E = E0 + B0 * V / BP * (((V0 / V)**BP) / (BP - 1) + 1) - V0 * B0 / (BP - 1)
return E
def birch(V, E0, B0, BP, V0):
"""
From Intermetallic compounds: Principles and Practice, Vol. I: Principles
Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos
paper downloaded from Web
case where n=0
"""
E = (E0 +
9 / 8 * B0 * V0 * ((V0 / V)**(2 / 3) - 1)**2 +
9 / 16 * B0 * V0 * (BP - 4) * ((V0 / V)**(2 / 3) - 1)**3)
return E
def birchmurnaghan(V, E0, B0, BP, V0):
"""
BirchMurnaghan equation from PRB 70, 224107
Eq. (3) in the paper. Note that there's a typo in the paper and it uses
inversed expression for eta.
"""
eta = (V0 / V)**(1 / 3)
E = E0 + 9 * B0 * V0 / 16 * (eta**2 - 1)**2 * (
6 + BP * (eta**2 - 1) - 4 * eta**2)
return E
def check_birchmurnaghan():
from sympy import symbols, Rational, diff, simplify
v, b, bp, v0 = symbols('v b bp v0')
x = (v0 / v)**Rational(2, 3)
e = 9 * b * v0 * (x - 1)**2 * (6 + bp * (x - 1) - 4 * x) / 16
print(e)
B = diff(e, v, 2) * v
BP = -v * diff(B, v) / b
print(simplify(B.subs(v, v0)))
print(simplify(BP.subs(v, v0)))
def pouriertarantola(V, E0, B0, BP, V0):
'Pourier-Tarantola equation from PRB 70, 224107'
eta = (V / V0)**(1 / 3)
squiggle = -3 * np.log(eta)
E = E0 + B0 * V0 * squiggle**2 / 6 * (3 + squiggle * (BP - 2))
return E
def vinet(V, E0, B0, BP, V0):
'Vinet equation from PRB 70, 224107'
eta = (V / V0)**(1 / 3)
E = (E0 + 2 * B0 * V0 / (BP - 1)**2 *
(2 - (5 + 3 * BP * (eta - 1) - 3 * eta) *
np.exp(-3 * (BP - 1) * (eta - 1) / 2)))
return E
def antonschmidt(V, Einf, B, n, V0):
"""From Intermetallics 11, 23-32 (2003)
Einf should be E_infinity, i.e. infinite separation, but
according to the paper it does not provide a good estimate
of the cohesive energy. They derive this equation from an
empirical formula for the volume dependence of pressure,
E(vol) = E_inf + int(P dV) from V=vol to V=infinity
but the equation breaks down at large volumes, so E_inf
is not that meaningful
n should be about -2 according to the paper.
I find this equation does not fit volumetric data as well
as the other equtions do.
"""
E = B * V0 / (n + 1) * (V / V0)**(n + 1) * (np.log(V / V0) -
(1 / (n + 1))) + Einf
return E
def p3(V, c0, c1, c2, c3):
'polynomial fit'
E = c0 + c1 * V + c2 * V**2 + c3 * V**3
return E
def parabola(x, a, b, c):
"""parabola polynomial function
this function is used to fit the data to get good guesses for
the equation of state fits
a 4th order polynomial fit to get good guesses for
was not a good idea because for noisy data the fit is too wiggly
2nd order seems to be sufficient, and guarantees a single minimum"""
return a + b * x + c * x**2
class EquationOfState:
"""Fit equation of state for bulk systems.
The following equation is used::
sjeos (default)
A third order inverse polynomial fit 10.1103/PhysRevB.67.026103
::
2 3 -1/3
E(V) = c + c t + c t + c t , t = V
0 1 2 3
taylor
A third order Taylor series expansion about the minimum volume
murnaghan
PRB 28, 5480 (1983)
birch
Intermetallic compounds: Principles and Practice,
Vol I: Principles. pages 195-210
birchmurnaghan
PRB 70, 224107
pouriertarantola
PRB 70, 224107
vinet
PRB 70, 224107
antonschmidt
Intermetallics 11, 23-32 (2003)
p3
A third order polynomial fit
Use::
eos = EquationOfState(volumes, energies, eos='murnaghan')
v0, e0, B = eos.fit()
eos.plot()
"""
def __init__(self, volumes, energies, eos='sj'):
self.v = np.array(volumes)
self.e = np.array(energies)
if eos == 'sjeos':
eos = 'sj'
self.eos_string = eos
self.v0 = None
def fit(self):
"""Calculate volume, energy, and bulk modulus.
Returns the optimal volume, the minimum energy, and the bulk
modulus. Notice that the ASE units for the bulk modulus is
eV/Angstrom^3 - to get the value in GPa, do this::
v0, e0, B = eos.fit()
print(B / kJ * 1.0e24, 'GPa')
"""
if self.eos_string == 'sj':
return self.fit_sjeos()
self.func = globals()[self.eos_string]
p0 = [min(self.e), 1, 1]
popt, pcov = curve_fit(parabola, self.v, self.e, p0)
parabola_parameters = popt
# Here I just make sure the minimum is bracketed by the volumes
# this if for the solver
minvol = min(self.v)
maxvol = max(self.v)
# the minimum of the parabola is at dE/dV = 0, or 2 * c V +b =0
c = parabola_parameters[2]
b = parabola_parameters[1]
a = parabola_parameters[0]
parabola_vmin = -b / 2 / c
catch_warn = False
if not (minvol < parabola_vmin and parabola_vmin < maxvol):
catch_warn=True
print('Warning the minimum volume of a fitted parabola is not in '
'your volumes. You may not have a minimum in your dataset')
# evaluate the parabola at the minimum to estimate the groundstate
# energy
E0 = parabola(parabola_vmin, a, b, c)
# estimate the bulk modulus from Vo * E''. E'' = 2 * c
B0 = 2 * c * parabola_vmin
if self.eos_string == 'antonschmidt':
BP = -2
else:
BP = 4
initial_guess = [E0, B0, BP, parabola_vmin]
# now fit the equation of state
if not catch_warn:
p0 = initial_guess
popt, pcov = curve_fit(self.func, self.v, self.e, p0)
self.eos_parameters = popt
self.eos_errors = np.sqrt(np.diag(pcov))
if self.eos_string == 'p3':
c0, c1, c2, c3 = self.eos_parameters
# find minimum E in E = c0 + c1 * V + c2 * V**2 + c3 * V**3
# dE/dV = c1+ 2 * c2 * V + 3 * c3 * V**2 = 0
# solve by quadratic formula with the positive root
a = 3 * c3
b = 2 * c2
c = c1
self.v0 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2 * a)
self.e0 = p3(self.v0, c0, c1, c2, c3)
self.B = (2 * c2 + 6 * c3 * self.v0) * self.v0
else:
self.v0 = self.eos_parameters[3]
self.v0_err = self.eos_errors[3]
self.e0 = self.eos_parameters[0]
self.e0_err = self.eos_errors[0]
self.B = self.eos_parameters[1]
self.B_err = self.eos_errors[1]
self.BP = self.eos_parameters[2]
self.BP_errors = self.eos_errors[2]
# print (self.func(self.v,self.eos_parameters[0],self.eos_parameters[1],self.eos_parameters[2],self.eos_parameters[3]))
return self.e0, self.e0_err, self.v0, self.v0_err,\
self.B, self.B_err, self.BP , self.BP_errors, self.func(self.v,self.eos_parameters[0],self.eos_parameters[1],self.eos_parameters[2],self.eos_parameters[3])
else:
return 'Warn', 'Warn', 'Warn', 'Warn', 'Warn', 'Warn', 'Warn', 'Warn', 'Warn'
def plot(self, filename=None, show=None, ax=None):
"""Plot fitted energy curve.
Uses Matplotlib to plot the energy curve. Use *show=True* to
show the figure and *filename='abc.png'* or
*filename='abc.eps'* to save the figure to a file."""
import matplotlib.pyplot as plt
if self.v0 is None:
self.fit()
if filename is None and show is None:
show = True
if ax is None:
ax = plt.gca()
x = np.linspace(min(self.v), max(self.v), 100)
if self.eos_string == 'sj':
y = self.fit0(x**-(1 / 3))
else:
y = self.func(x, *self.eos_parameters)
ax.plot(x, y, '-r')
ax.plot(self.v, self.e, 'o')
try:
ax.set_xlabel(u'volume [Å$^3$]')
ax.set_ylabel(u'energy [eV]')
ax.set_title(u'%s: E: %.3f eV, V: %.3f Å$^3$, B: %.3f GPa' %
(self.eos_string, self.e0, self.v0,
self.B / kJ * 1.e24))
except ImportError: # XXX what would cause this error? LaTeX?
ax.set_xlabel(u'volume [L(length)^3]')
ax.set_ylabel(u'energy [E(energy)]')
ax.set_title(u'%s: E: %.3f E, V: %.3f L^3, B: %.3e E/L^3' %
(self.eos_string, self.e0, self.v0, self.B))
if show:
plt.show()
if filename is not None:
fig = ax.get_figure()
fig.savefig(filename)
return ax
def fit_sjeos(self):
"""Calculate volume, energy, and bulk modulus.
Returns the optimal volume, the minimum energy, and the bulk
modulus. Notice that the ASE units for the bulk modulus is
eV/Angstrom^3 - to get the value in GPa, do this::
v0, e0, B = eos.fit()
print(B / kJ * 1.0e24, 'GPa')
"""
fit0 = np.poly1d(np.polyfit(self.v**-(1 / 3), self.e, 3))
fit1 = np.polyder(fit0, 1)
fit2 = np.polyder(fit1, 1)
self.v0 = None
for t in np.roots(fit1):
if isinstance(t, float) and t > 0 and fit2(t) > 0:
self.v0 = t**-3
break
if self.v0 is None:
raise ValueError('No minimum!')
self.e0 = fit0(t)
self.B = t**5 * fit2(t) / 9
self.fit0 = fit0
return self.v0, self.e0, self.B
def main():
import optparse
from ase.io import read
parser = optparse.OptionParser(usage='python -m ase.eos [options] '
'trajectory, ...',
description='Calculate equation of state.')
parser.add_option('-p', '--plot', action='store_true')
parser.add_option('-t', '--type', default='sj')
opts, args = parser.parse_args()
if not opts.plot:
print('# filename '
'points volume energy bulk modulus')
print('# '
' [Ang^3] [eV] [GPa]')
for name in args:
if name == '-':
# Special case - used by ase-gui:
import pickle
import sys
if sys.version_info[0] == 2:
v, e = pickle.load(sys.stdin)
else:
v, e = pickle.load(sys.stdin.buffer)
else:
if '@' in name:
index = None
else:
index = ':'
images = read(name, index=index)
v = [atoms.get_volume() for atoms in images]
e = [atoms.get_potential_energy() for atoms in images]
eos = EquationOfState(v, e, opts.type)
if opts.plot:
eos.plot()
else:
try:
v0, e0, B = eos.fit()
except ValueError as ex:
print('{0:30}{1:2} {2}'.format(name, len(v), ex.message))
else:
print('{0:30}{1:2} {2:10.3f}{3:10.3f}{4:14.3f}'
.format(name, len(v), v0, e0, B / kJ * 1.0e24))
if __name__ == '__main__':
main()
| joshgabriel/dft-crossfilter | CompleteApp/eos.py | Python | mit | 13,301 | [
"ASE"
] | ebd40b85eb23fdeca09c73d42f5bb34d285edd228145c02c762e0f37f10777dd |
"""Gaussian Mixture Model."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import BaseMixture, _check_shape
from ..externals.six.moves import zip
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..utils.extmath import row_norms
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like, shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(weights, (n_components,), 'weights')
# check range
if (any(np.less(weights, 0.)) or
any(np.greater(weights, 1.))):
raise ValueError("The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights), np.max(weights)))
# check normalization
if not np.allclose(np.abs(1. - np.sum(weights)), 0.):
raise ValueError("The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f" % np.sum(weights))
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like, shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), 'means')
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be "
"positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (np.allclose(precision, precision.T) and
np.all(linalg.eigvalsh(precision) > 0.)):
raise ValueError("'%s precision' should be symmetric, "
"positive-definite" % covariance_type)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for k, prec in enumerate(precisions):
prec = _check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like,
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : string
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(precisions, dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type is 'full')
precisions_shape = {'full': (n_components, n_features, n_features),
'tied': (n_features, n_features),
'diag': (n_components, n_features),
'spherical': (n_components,)}
_check_shape(precisions, precisions_shape[covariance_type],
'%s precision' % covariance_type)
_check_precisions = {'full': _check_precisions_full,
'tied': _check_precision_matrix,
'diag': _check_precision_positivity,
'spherical': _check_precision_positivity}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[::n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied covariance matrix.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariance : array, shape (n_features, n_features)
The tied covariance matrix of the components.
"""
n_samples, _ = X.shape
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariance = avg_X2 - avg_means2
covariance /= n_samples
covariance.flat[::len(covariance) + 1] += reg_covar
return covariance
def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal covariance vectors.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features)
The covariance vector of the current components.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means ** 2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk,
means, reg_covar).mean(1)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data array.
resp : array-like, shape (n_samples, n_features)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like, shape (n_components,)
The numbers of data samples in the current components.
means : array-like, shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"The algorithm has diverged because of too few samples per "
"components. Try to decrease the number of components, "
"or increase reg_covar.")
if covariance_type in 'full':
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True).T
elif covariance_type is 'tied':
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features),
lower=True).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1. / np.sqrt(covariances)
return precisions_chol
###############################################################################
# Gaussian mixture probability estimators
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like,
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like, shape (n_components,)
The determinant of the cholesky decomposition.
matrix.
"""
if covariance_type == 'full':
n_components, _, _ = matrix_chol.shape
log_det_chol = (np.sum(np.log(
matrix_chol.reshape(
n_components, -1)[:, ::n_features + 1]), 1))
elif covariance_type == 'tied':
log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))
elif covariance_type == 'diag':
log_det_chol = (np.sum(np.log(matrix_chol), axis=1))
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
means : array-like, shape (n_components, n_features)
precisions_chol : array-like,
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(
precisions_chol, covariance_type, n_features)
if covariance_type == 'full':
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'tied':
log_prob = np.empty((n_samples, n_components))
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'diag':
precisions = precisions_chol ** 2
log_prob = (np.sum((means ** 2 * precisions), 1) -
2. * np.dot(X, (means * precisions).T) +
np.dot(X ** 2, precisions.T))
elif covariance_type == 'spherical':
precisions = precisions_chol ** 2
log_prob = (np.sum(means ** 2, 1) * precisions -
2 * np.dot(X, means.T * precisions) +
np.outer(row_norms(X, squared=True), precisions))
return -.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
class GaussianMixture(BaseMixture):
"""Gaussian Mixture.
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
Parameters
----------
n_components : int, defaults to 1.
The number of mixture components.
covariance_type : {'full', 'tied', 'diag', 'spherical'},
defaults to 'full'.
String describing the type of covariance parameters to use.
Must be one of::
'full' (each component has its own general covariance matrix).
'tied' (all components share the same general covariance matrix),
'diag' (each component has its own diagonal covariance matrix),
'spherical' (each component has its own single variance),
tol : float, defaults to 1e-3.
The convergence threshold. EM iterations will stop when the
log_likelihood average gain is below this threshold.
reg_covar : float, defaults to 0.
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, defaults to 100.
The number of EM iterations to perform.
n_init : int, defaults to 1.
The number of initializations to perform. The best results are kept.
init_params : {'kmeans', 'random'}, defaults to 'kmeans'.
The method used to initialize the weights, the means and the
precisions.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weights_init : array-like, shape (n_components, ), optional
The user-provided initial weights, defaults to None.
If it None, weights are initialized using the `init_params` method.
means_init: array-like, shape (n_components, n_features), optional
The user-provided initial means, defaults to None,
If it None, means are initialized using the `init_params` method.
precisions_init: array-like, optional.
The user-provided initial precisions (inverse of the covariance
matrices), defaults to None.
If it None, precisions are initialized using the 'init_params' method.
The shape depends on 'covariance_type'::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
random_state: RandomState or an int seed, defaults to None.
A random number generator instance.
warm_start : bool, default to False.
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several time on similar problems.
verbose : int, default to 0.
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
Attributes
----------
weights_ : array-like, shape (n_components,)
The weights of each mixture components.
means_ : array-like, shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of EM to reach the convergence.
lower_bound_ : float
Log-likelihood of the best fit of EM.
"""
def __init__(self, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weights_init=None, means_init=None, precisions_init=None,
random_state=None, warm_start=False,
verbose=0, verbose_interval=10):
super(GaussianMixture, self).__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval)
self.covariance_type = covariance_type
self.weights_init = weights_init
self.means_init = means_init
self.precisions_init = precisions_init
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% self.covariance_type)
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init,
self.n_components)
if self.means_init is not None:
self.means_init = _check_means(self.means_init,
self.n_components, n_features)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(self.precisions_init,
self.covariance_type,
self.n_components,
n_features)
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type)
weights /= n_samples
self.weights_ = (weights if self.weights_init is None
else self.weights_init)
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type)
elif self.covariance_type is 'full':
self.precisions_cholesky_ = np.array(
[linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init])
elif self.covariance_type is 'tied':
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init,
lower=True)
else:
self.precisions_cholesky_ = self.precisions_init
def _e_step(self, X):
log_prob_norm, log_resp = self._estimate_log_prob_resp(X)
return np.mean(log_prob_norm), np.exp(log_resp)
def _m_step(self, X, resp):
n_samples, _ = X.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(X, resp, self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_log_prob(self, X):
return _estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type)
def _estimate_log_weights(self):
return np.log(self.weights_)
def _compute_lower_bound(self, _, log_prob_norm):
return log_prob_norm
def _check_is_fitted(self):
check_is_fitted(self, ['weights_', 'means_', 'precisions_cholesky_'])
def _get_parameters(self):
return (self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_)
def _set_parameters(self, params):
(self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_) = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type is 'full':
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
elif self.covariance_type is 'tied':
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
else:
self.precisions_ = self.precisions_cholesky_ ** 2
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == 'full':
cov_params = self.n_components * n_features * (n_features + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * n_features
elif self.covariance_type == 'tied':
cov_params = n_features * (n_features + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
Returns
-------
bic: float
The greater the better.
"""
return (-2 * self.score(X) * X.shape[0] +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model on the input X.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float
The greater the better.
"""
return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()
| sonnyhu/scikit-learn | sklearn/mixture/gaussian_mixture.py | Python | bsd-3-clause | 26,908 | [
"Gaussian"
] | b8648531e9f58979c64011a11200f37d3f7421940504b24325ea3007ace78b6a |
# -*- coding: utf-8 -*-
r"""Tests lattice math
"""
import numpy as np
import pytest
from neutronpy.crystal import lattice
unitcell = lattice.Lattice(4, 4, 4, 90, 90, 90)
def test_get_angle_between_planes():
"""Tests get angle between planes defined by two vectors
"""
assert (unitcell.get_angle_between_planes([1, 0, 0], [1, 1, 1]) - 54.73561031724535 < 1e-6)
def test_get_d_spacing():
"""Tests d-spacing for given HKL
"""
assert (unitcell.get_d_spacing([1, 1, 1]) == unitcell.a / np.sqrt(3))
def test_get_q():
"""Tests q for given HKL
"""
assert (abs(unitcell.get_q([1, 1, 1]) - 2 * np.pi / unitcell.b * np.sqrt(3)) < 1e-12)
def test_get_two_theta():
"""Tests 2theta for given HKL
"""
assert (unitcell.get_two_theta([1, 1, 1], 2) == 51.317812546510552)
def test_constants():
"""Test that gettters/setters work properly
"""
abg = np.array([unitcell.alpha, unitcell.beta, unitcell.gamma])
abc = np.array([unitcell.a, unitcell.b, unitcell.c])
abg_rad = np.array([unitcell.alpha_rad, unitcell.beta_rad, unitcell.gamma_rad])
abgstar_rad = np.array([unitcell.alphastar_rad, unitcell.betastar_rad, unitcell.gammastar_rad])
abgstar = np.array([unitcell.alphastar, unitcell.betastar, unitcell.gammastar])
abcstar = np.array([unitcell.astar, unitcell.bstar, unitcell.cstar])
assert (np.all(unitcell.abc == abc))
assert (np.all(unitcell.abg == abg))
assert (np.all(unitcell.abg_rad == abg_rad))
assert (np.all(unitcell.abg_rad == np.deg2rad(abg)))
assert (np.round(unitcell.volume, 12) == 4 ** 3)
assert (np.round(unitcell.reciprocal_volume, 12) == np.round(8 * np.pi ** 3 / (4 ** 3), 12))
assert (unitcell.astar == unitcell.b * unitcell.c * np.sin(unitcell.alpha_rad) / unitcell.volume * 2 * np.pi)
assert (unitcell.bstar == unitcell.a * unitcell.c * np.sin(unitcell.beta_rad) / unitcell.volume * 2 * np.pi)
assert (unitcell.cstar == unitcell.a * unitcell.b * np.sin(unitcell.gamma_rad) / unitcell.volume * 2 * np.pi)
assert (np.all(abgstar_rad == np.deg2rad(abgstar)))
assert (np.all(unitcell.reciprocal_abc == abcstar))
assert (np.all(unitcell.reciprocal_abg == abgstar))
assert (np.all(unitcell.reciprocal_abg_rad == abgstar_rad))
assert (np.all(np.round(unitcell.Bmatrix * unitcell.Bmatrix.T, 12) == np.round(unitcell.Gstar, 12)))
def test_lattice_type():
"""Test lattice type determination
"""
test_cell = unitcell
assert (test_cell.lattice_type == 'cubic')
test_cell = lattice.Lattice(1, 1, 2, 90, 90, 90)
assert (test_cell.lattice_type == 'tetragonal')
test_cell = lattice.Lattice(1, 2, 3, 90, 90, 90)
assert (test_cell.lattice_type == 'orthorhombic')
test_cell = lattice.Lattice(1, 2, 3, 90, 89, 90)
assert (test_cell.lattice_type == 'monoclinic')
test_cell = lattice.Lattice(1, 1, 1, 39, 39, 39)
assert (test_cell.lattice_type == 'rhombohedral')
test_cell = lattice.Lattice(1, 1, 1, 39, 39, 39)
assert (test_cell.lattice_type == 'rhombohedral')
test_cell = lattice.Lattice(1, 1, 2, 90, 90, 120)
assert (test_cell.lattice_type == 'hexagonal')
test_cell = lattice.Lattice(1, 2, 3, 30, 60, 120)
assert (test_cell.lattice_type == 'triclinic')
test_cell = lattice.Lattice(1, 1, 2, 90, 90, 150)
with pytest.raises(ValueError):
getattr(test_cell, 'lattice_type')
def test_goniometer_constants():
"""Test constants
"""
pass
if __name__ == "__main__":
pytest.main()
| granrothge/neutronpy | tests/test_lattice.py | Python | mit | 3,524 | [
"CRYSTAL"
] | 2155b72dc70050cea492476796c6cbc5c9af9ba507d41e6098d8e652d2f97791 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
##############################################################################
The calculation of molecular topological indices based on its topological
structure. You can get 25 molecular topological descriptors. You can freely
use and distribute it. If you hava any problem, you could contact with us timely!
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: gadsby@163.com and oriental-cds@163.com
##############################################################################
"""
# Standard library
from collections import Counter
# Third party modules
import numpy
import scipy
from rdkit import Chem
from rdkit.Chem import GraphDescriptors as GD
from rdkit.Chem import rdchem
periodicTable = rdchem.GetPeriodicTable()
Version = 1.0
################################################################
def _GetPrincipleQuantumNumber(atNum):
"""
#################################################################
*Internal Use Only*
Get the principle quantum number of atom with atomic
number equal to atNum
#################################################################
"""
if atNum <= 2:
return 1
elif atNum <= 10:
return 2
elif atNum <= 18:
return 3
elif atNum <= 36:
return 4
elif atNum <= 54:
return 5
elif atNum <= 86:
return 6
else:
return 7
def CalculateWeiner(mol):
"""
#################################################################
Calculation of Weiner number in a molecule
---->W
Usage:
result=CalculateWeiner(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
return 1.0 / 2 * sum(sum(Chem.GetDistanceMatrix(mol)))
def CalculateMeanWeiner(mol):
"""
#################################################################
Calculation of Mean Weiner number in a molecule
---->AW
Usage:
result=CalculateWeiner(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
N = mol.GetNumAtoms()
WeinerNumber = CalculateWeiner(mol)
return 2.0 * WeinerNumber / (N * (N - 1))
def CalculateBalaban(mol):
"""
#################################################################
Calculation of Balaban index in a molecule
---->J
Usage:
result=CalculateBalaban(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
adjMat = Chem.GetAdjacencyMatrix(mol)
Distance = Chem.GetDistanceMatrix(mol)
Nbond = mol.GetNumBonds()
Natom = mol.GetNumAtoms()
S = numpy.sum(Distance, axis=1)
mu = Nbond - Natom + 1
sumk = 0.0
for i in range(len(Distance)):
si = S[i]
for j in range(i, len(Distance)):
if adjMat[i, j] == 1:
sumk += 1.0 / numpy.sqrt(si * S[j])
if mu + 1 != 0:
J = float(Nbond) / float(mu + 1) * sumk
else:
J = 0
return J
def CalculateGraphDistance(mol):
"""
#################################################################
Calculation of graph distance index
---->Tigdi(log value)
Usage:
result=CalculateGraphDistance(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
Distance= Chem.GetDistanceMatrix(mol)
temp = Counter(Distance.flatten())
res = sum(
map(lambda x: (x*0.5)**2, temp.values())
)
return numpy.log10(res)
def CalculateDiameter(mol):
"""
#################################################################
Calculation of diameter, which is Largest value
in the distance matrix [Petitjean 1992].
---->diametert
Usage:
result=CalculateDiameter(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
Distance = Chem.GetDistanceMatrix(mol)
return Distance.max()
def CalculateRadius(mol):
"""
#################################################################
Calculation of radius based on topology.
It is :If ri is the largest matrix entry in row i of the distance
matrix D,then the radius is defined as the smallest of the ri
[Petitjean 1992].
---->radiust
Usage:
result=CalculateRadius(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
Distance = Chem.GetDistanceMatrix(mol)
temp = []
for i in Distance:
temp.append(max(i))
return min(temp)
def CalculatePetitjean(mol):
"""
#################################################################
Calculation of Petitjean based on topology.
Value of (diameter - radius) / diameter as defined in [Petitjean 1992].
---->petitjeant
Usage:
result=CalculatePetitjean(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
diameter = CalculateDiameter(mol)
radius = CalculateRadius(mol)
return 1 - radius / float(diameter)
def CalculateXuIndex(mol):
"""
#################################################################
Calculation of Xu index
---->Xu
Usage:
result=CalculateXuIndex(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
nAT = mol.GetNumAtoms()
deltas = [x.GetDegree() for x in mol.GetAtoms()]
Distance = Chem.GetDistanceMatrix(mol)
sigma = scipy.sum(Distance, axis=1)
temp1 = 0.0
temp2 = 0.0
for i in range(nAT):
temp1 = temp1 + deltas[i] * ((sigma[i]) ** 2)
temp2 = temp2 + deltas[i] * (sigma[i])
Xu = numpy.sqrt(nAT) * numpy.log(temp1 / temp2)
return Xu
def CalculateGutmanTopo(mol):
"""
#################################################################
Calculation of Gutman molecular topological index based on
simple vertex degree
---->GMTI(log value)
Usage:
result=CalculateGutmanTopo(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
nAT = mol.GetNumAtoms()
deltas = [x.GetDegree() for x in mol.GetAtoms()]
Distance = Chem.GetDistanceMatrix(mol)
res = 0.0
for i in range(nAT):
for j in range(i + 1, nAT):
res = res + deltas[i] * deltas[j] * Distance[i, j]
return numpy.log10(res)
def CalculatePolarityNumber(mol):
"""
#################################################################
Calculation of Polarity number.
It is the number of pairs of vertexes at
distance matrix equal to 3
---->Pol
Usage:
result=CalculatePolarityNumber(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
Distance = Chem.GetDistanceMatrix(mol)
res = 1.0 / 2 * sum(sum(Distance == 3))
return res
def CalculatePoglianiIndex(mol):
"""
#################################################################
Calculation of Poglicani index
The Pogliani index (Dz) is the sum over all non-hydrogen atoms
of a modified vertex degree calculated as the ratio
of the number of valence electrons over the principal
quantum number of an atom [L. Pogliani, J.Phys.Chem.
1996, 100, 18065-18077].
---->DZ
Usage:
result=CalculatePoglianiIndex(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
res = 0.0
for atom in mol.GetAtoms():
n = atom.GetAtomicNum()
nV = periodicTable.GetNOuterElecs(n)
mP = _GetPrincipleQuantumNumber(n)
res = res + (nV + 0.0) / mP
return res
def CalculateIpc(mol):
"""
#################################################################
This returns the information content of the coefficients of the
characteristic polynomial of the adjacency matrix of a
hydrogen-suppressed graph of a molecule.
'avg = 1' returns the information content divided by the total
population.
From D. Bonchev & N. Trinajstic, J. Chem. Phys. vol 67,
4517-4533 (1977)
---->Ipc(log value)
Usage:
result=CalculateIpc(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
temp = GD.Ipc(mol)
if temp > 0:
return numpy.log10(temp)
else:
return "NaN"
def CalculateBertzCT(mol):
"""
#################################################################
A topological index meant to quantify "complexity" of molecules.
Consists of a sum of two terms, one representing the complexity
of the bonding, the other representing the complexity of the
distribution of heteroatoms.
From S. H. Bertz, J. Am. Chem. Soc., vol 103, 3599-3601 (1981)
---->BertzCT(log value)
Usage:
result=CalculateBertzCT(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
temp = GD.BertzCT(mol)
if temp > 0:
return numpy.log10(temp)
else:
return "NaN"
def CalculateHarary(mol):
"""
#################################################################
Calculation of Harary number
---->Thara
Usage:
result=CalculateHarary(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
Distance = numpy.array(Chem.GetDistanceMatrix(mol), "d")
return 1.0 / 2 * (sum(1.0 / Distance[Distance != 0]))
def CalculateSchiultz(mol):
"""
#################################################################
Calculation of Schiultz number
---->Tsch(log value)
Usage:
result=CalculateSchiultz(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
Distance = numpy.array(Chem.GetDistanceMatrix(mol), "d")
Adjacent = numpy.array(Chem.GetAdjacencyMatrix(mol), "d")
VertexDegree = sum(Adjacent)
return sum(scipy.dot((Distance + Adjacent), VertexDegree))
def CalculateZagreb1(mol):
"""
#################################################################
Calculation of Zagreb index with order 1 in a molecule
---->ZM1
Usage:
result=CalculateZagreb1(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
deltas = [x.GetDegree() for x in mol.GetAtoms()]
return sum(numpy.array(deltas) ** 2)
def CalculateZagreb2(mol):
"""
#################################################################
Calculation of Zagreb index with order 2 in a molecule
---->ZM2
Usage:
result=CalculateZagreb2(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
ke = [
x.GetBeginAtom().GetDegree() * x.GetEndAtom().GetDegree()
for x in mol.GetBonds()
]
return sum(ke)
def CalculateMZagreb1(mol):
"""
#################################################################
Calculation of Modified Zagreb index with order 1 in a molecule
---->MZM1
Usage:
result=CalculateMZagreb1(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
deltas = [x.GetDegree() for x in mol.GetAtoms()]
while 0 in deltas:
deltas.remove(0)
deltas = numpy.array(deltas, "d")
res = sum((1.0 / deltas) ** 2)
return res
def CalculateMZagreb2(mol):
"""
#################################################################
Calculation of Modified Zagreb index with order 2 in a molecule
---->MZM2
Usage:
result=CalculateMZagreb2(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
cc = [
x.GetBeginAtom().GetDegree() * x.GetEndAtom().GetDegree()
for x in mol.GetBonds()
]
while 0 in cc:
cc.remove(0)
cc = numpy.array(cc, "d")
res = sum((1.0 / cc) ** 2)
return res
def CalculateQuadratic(mol):
"""
#################################################################
Calculation of Quadratic index in a molecule
---->Qindex
Usage:
result=CalculateQuadratic(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
M = CalculateZagreb1(mol)
N = mol.GetNumAtoms()
return 3 - 2 * N + M / 2.0
def CalculatePlatt(mol):
"""
#################################################################
Calculation of Platt number in a molecule
---->Platt
Usage:
result=CalculatePlatt(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
cc = [
x.GetBeginAtom().GetDegree() + x.GetEndAtom().GetDegree() - 2
for x in mol.GetBonds()
]
return sum(cc)
def CalculateSimpleTopoIndex(mol):
"""
#################################################################
Calculation of the logarithm of the simple topological index by Narumi,
which is defined as the product of the vertex degree.
---->Sito
Usage:
result=CalculateSimpleTopoIndex(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
deltas = [x.GetDegree() for x in mol.GetAtoms()]
while 0 in deltas:
deltas.remove(0)
deltas = numpy.array(deltas, "d")
res = numpy.prod(deltas)
if res > 0:
return numpy.log10(res)
else:
return "NaN"
def CalculateHarmonicTopoIndex(mol):
"""
#################################################################
Calculation of harmonic topological index proposed by Narnumi.
---->Hato
Usage:
result=CalculateHarmonicTopoIndex(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
deltas = [x.GetDegree() for x in mol.GetAtoms()]
while 0 in deltas:
deltas.remove(0)
deltas = numpy.array(deltas, "d")
nAtoms = mol.GetNumAtoms()
res = nAtoms / sum(1.0 / deltas)
return res
def CalculateGeometricTopoIndex(mol):
"""
#################################################################
Geometric topological index by Narumi
---->Geto
Usage:
result=CalculateGeometricTopoIndex(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
nAtoms = mol.GetNumAtoms()
deltas = [x.GetDegree() for x in mol.GetAtoms()]
while 0 in deltas:
deltas.remove(0)
deltas = numpy.array(deltas, "d")
temp = numpy.prod(deltas)
res = numpy.power(temp, 1.0 / nAtoms)
return res
def CalculateArithmeticTopoIndex(mol):
"""
#################################################################
Arithmetic topological index by Narumi
---->Arto
Usage:
result=CalculateArithmeticTopoIndex(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
nAtoms = mol.GetNumAtoms()
nBonds = mol.GetNumBonds()
res = 2.0 * nBonds / nAtoms
return res
_Topology = {
"W": CalculateWeiner,
"AW": CalculateMeanWeiner,
"J": CalculateBalaban,
"Tigdi": CalculateGraphDistance,
"Xu": CalculateXuIndex,
"GMTI": CalculateGutmanTopo,
"Pol": CalculatePolarityNumber,
"DZ": CalculatePoglianiIndex,
"Ipc": CalculateIpc,
"BertzCT": CalculateBertzCT,
"Thara": CalculateHarary,
"Tsch": CalculateSchiultz,
"ZM1": CalculateZagreb1,
"ZM2": CalculateZagreb2,
"MZM1": CalculateMZagreb1,
"MZM2": CalculateMZagreb2,
"Qindex": CalculateQuadratic,
"Platt": CalculatePlatt,
"diametert": CalculateDiameter,
"radiust": CalculateRadius,
"petitjeant": CalculatePetitjean,
"Sito": CalculateSimpleTopoIndex,
"Hato": CalculateHarmonicTopoIndex,
"Geto": CalculateGeometricTopoIndex,
"Arto": CalculateArithmeticTopoIndex,
}
def GetTopology(mol):
"""
#################################################################
Get the dictionary of constitutional descriptors for given
moelcule mol
Usage:
result=CalculateWeiner(mol)
Input: mol is a molecule object
Output: result is a dict form containing all topological indices.
#################################################################
"""
result = {}
for DesLabel in _Topology.keys():
result[DesLabel] = round(_Topology[DesLabel](mol), 3)
return result
def _GetHTMLDoc():
"""
#################################################################
Write HTML documentation for this module.
#################################################################
"""
import pydoc
pydoc.writedoc("topology")
################################################################################
#####################################################################
if __name__ == "__main__":
smis = ["CCCC", "CCCCC", "CCCCCC", "CC(N)C(=O)O", "CC(N)C(=O)[O-]"]
for index, smi in enumerate(smis):
m = Chem.MolFromSmiles(smi)
print(index + 1)
print(smi)
print("\t", GetTopology(m))
print("\t", len(GetTopology(m)))
| gadsbyfly/PyBioMed | PyBioMed/PyMolecule/topology.py | Python | bsd-3-clause | 19,317 | [
"RDKit"
] | 159752873753e0d712e3b93ac56b22914e6e120c8f4eec90f0eec96927b00691 |
""" Utility for loading plotting types.
Works both for Accounting and Monitoring.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from DIRAC.Core.Utilities.Plotting.ObjectLoader import loadObjects
from DIRAC.AccountingSystem.Client.Types.BaseAccountingType import BaseAccountingType
from DIRAC.MonitoringSystem.Client.Types.BaseType import BaseType
__RCSID__ = "$Id$"
########################################################################
class TypeLoader(object):
"""
.. class:: BaseType
:param dict loaded: it stores the loaded classes
:param str path: The location of the classes
:param ~DIRAC.MonitoringSystem.Client.Types.BaseType.BaseType parentCls: it is the parent class
:param regexp: regular expression...
"""
########################################################################
def __init__(self, plottingFamily='Accounting'):
"""c'tor
"""
self.__loaded = {}
if plottingFamily == 'Accounting':
self.__path = "AccountingSystem/Client/Types"
self.__parentCls = BaseAccountingType
elif plottingFamily == 'Monitoring':
self.__path = "MonitoringSystem/Client/Types"
self.__parentCls = BaseType
self.__reFilter = re.compile(r".*[a-z1-9]\.py$")
########################################################################
def getTypes(self):
"""
It returns all monitoring classes
"""
if not self.__loaded:
self.__loaded = loadObjects(self.__path, self.__reFilter, self.__parentCls)
return self.__loaded
| yujikato/DIRAC | src/DIRAC/Core/Utilities/Plotting/TypeLoader.py | Python | gpl-3.0 | 1,594 | [
"DIRAC"
] | c6edac440a5f3c3d633924985802f78d3b4602be60f54bd974a790093693c9c5 |
# proxy module
from __future__ import absolute_import
from mayavi.components.ui.actor import *
| enthought/etsproxy | enthought/mayavi/components/ui/actor.py | Python | bsd-3-clause | 95 | [
"Mayavi"
] | e2d30cd6bb45593c2f4aaa4a3578bc09d02a0827978859f6dab64a6721cd3bbc |
import numpy as np, SimPEG as simpeg, vtk, sys, os, time
import vtk.util.numpy_support as npsup
# import polydata, extraction
def findZofXYOnPolydata(points,vtkPolydata):
# Make the cell locator
cellLocator = vtk.vtkCellLocator()
cellLocator.SetDataSet(vtkPolydata)
cellLocator.BuildLocator()
# Find the min/max of the polydata.
lbot, ltop = np.array(vtkPolydata.GetBounds())[4::]
# Loop over all the locations.
intersectList = []
try:
for nr, loc in enumerate(points):
# Make line
p1 = np.hstack((loc[0:2],ltop))
p2 = np.hstack((loc[0:2],lbot))
# Pre define variables as in C++
t = vtk.mutable(0)
pIntSect = [0.0, 0.0, 0.0]
pcoords = [0.0, 0.0, 0.0]
sub_id = vtk.mutable(0)
cellLocator.IntersectWithLine(p1,p2,1e-6,t,pIntSect,pcoords,sub_id)
intersectList.append(pIntSect)
except KeyboardInterrupt as k:
print 'Stopped at iteration {:d} in the for loop.'.format(nr)
raise k
# Return the intersects
return np.array(intersectList) | grosenkj/telluricpy | telluricpy/modelTools/surfaceIntersect.py | Python | mit | 982 | [
"VTK"
] | 57d5132dc1146c03ad3c23054c45e77ded92c132ebb83fb7d70ed5ed5581cb17 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views import defaults as default_views
from course.views import CourseManageView, CourseCreateView
urlpatterns = [
url(r'^$', CourseManageView.as_view(), name='manage'),
url(r'^createCourse/', CourseCreateView.as_view(), name='createCoure'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^api/', include('api.urls')),
url(r'^http/', include('course.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| Wetrain/mmu-course-api | config/urls.py | Python | mit | 1,491 | [
"VisIt"
] | 00e5505ad876ac3f9192221234b5b5c6a0cb35b53c30127c4d54b0d2f41a44b1 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import flt, cstr
import os
import sys
import subprocess
import getpass
import logging
import json
from distutils.spawn import find_executable
class SiteMaster(Document):
def on_update(self):
pass
def multitenanct(from_test=False):
frappe.errprint("creation site -------------------------------- ")
res=frappe.db.sql("""select name,client_name,email_id__if_administrator from `tabSite Master` where flag=0 limit 1 """)
if res:
sites=''
sites = frappe.db.sql("""select sites from `tabUser` where name='administrator'""")
print sites
print 'gangadharkadam'
auto_commit = not from_test
ste=res[0][0]
from frappe.utils import cstr
import os
import sys
import subprocess
import getpass
import logging
import json
from distutils.spawn import find_executable
from frappe.utils.email_lib import sendmail
cwd='/home/gangadhar/Documents/gnkuper/frappe-bench/'
print cwd
cmd="./testenv.sh "+ste
print cwd
qr="select email_id__if_administrator,client_name from `tabSite Master` where name='"+ste+"'"
rs=frappe.db.sql(qr)
frappe.errprint("hello gangadhar")
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='Successful first purchase'")
#frappe.errprint(etemp)
msg=etemp[0][1].replace('first_name',rs[0][1]).replace('user_name','administrator').replace('password','admin').replace('click here',ste)
sendmail(rs[0][0], subject=etemp[0][0], msg = msg)
#msg1="Hello "+res[0][1]+", <br> Welcome to TailorPad! <br> Thank you for showing interest. You can use the following link and credentials for trying TailorPad:<br> 'http://"+ste+"' <br> user name :-administrator<br> password :- admin<br>In case you need any more information about our product, please visit FAQ page or write to us on support@tailorpad.com, we will be glad to assist you.<br>Best Regards,<br>Team TailorPad"
#sendmail(rs[0][0], subject='Welcome to Tailorpad', msg = msg1)
import subprocess
frappe.errprint(cmd)
#subprocess.call(['cd /home/indictrans/webapps/tailorpad/'])
#pass
sites=cstr(sites[0][0])+' '+ste
frappe.db.sql("update `tabUser` set sites= %s where name='administrator'",sites)
try:
subprocess.check_call(cmd, cwd=cwd, shell=True)
except subprocess.CalledProcessError, e:
print "Error:", e.output
raise
print 'creating nginx'
nginx="""
upstream frappe {
server 127.0.0.1:8000 fail_timeout=0;
}
server {
listen 80 ;
client_max_body_size 4G;
server_name stich1.tailorpad.com %s;
keepalive_timeout 5;
sendfile on;
root /home/gangadhar/Documents/gnkuper/frappe-bench/sites;
location /private/ {
internal;
try_files /$uri =424;
}
location /assets {
try_files $uri =404;
}
location / {
try_files /stich1.tailorpad.com/public/$uri @magic;
}
location @magic {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_set_header X-Use-X-Accel-Redirect True;
proxy_read_timeout 120;
proxy_redirect off;
proxy_pass http://frappe;
}
}"""%(sites)
print nginx
with open("/home/gangadhar/Documents/gnkuper/frappe-bench/config/nginx.conf","w") as conf_file:
conf_file.write(nginx)
cwd='/home/'
cmd='echo indictrans | sudo service nginx reload'
print 'nginx reloading'
try:
subprocess.check_call(cmd, cwd=cwd, shell=True)
except subprocess.CalledProcessError, e:
print "Error:", e.output
raise
print "nginx reloaded"
host="""
127.0.0.1 localhost
127.0.1.1 gangadhar-OptiPlex-360
127.0.0.1 %s
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
"""%(sites)
print host
with open("/home/gangadhar/Documents/gnkuper/frappe-bench/config/hosts","w") as hosts_file:
hosts_file.write(host)
print 'written hosts nin setup'
os.system('echo indictrans | sudo -S cp /home/gangadhar/Documents/gnkuper/frappe-bench/config/hosts /etc/hosts')
print "reloaded hosts hosts"
from frappe.utils import nowdate,add_months,cint
en_dt=add_months(nowdate(),1)
from frappe.utils import nowdate,add_months,cint
en_dt=add_months(nowdate(),1)
qry="update `tabSite Master` set flag=1 ,expiry_date='"+en_dt+"' where name='"+cstr(res[0][0])+"'"
#frappe.errprint(qry)
frappe.db.sql(qry, auto_commit=auto_commit)
import requests
import json
qry="select site_name,email_id__if_administrator,country from `tabSite Master` where name='"+cstr(ste)+"'"
frappe.errprint(qry)
pr1 = frappe.db.sql(qry)
st=pr1 and pr1[0][0] or ''
eml=pr1 and pr1[0][1] or ''
cnt=pr1 and pr1[0][2] or ''
frappe.get_doc({
"doctype":"SubAdmin Info",
"parent": "SUB0001",
"parentfield": "subadmins_information",
"parenttype":"Admin Details",
"admin": eml,
"site_name":ste
}).insert()
headers = {'content-type': 'application/x-www-form-urlencoded'}
sup={'usr':'administrator','pwd':'admin'}
url = 'http://'+st+'/api/method/login'
response = requests.get(url, data=sup, headers=headers)
if st.find('.')!= -1:
db=st.split('.')[0][:16]
else:
db=st[:16]
vldt={}
vldt['country']=cnt
vldt['email_id_admin']=eml
url = 'http://'+st+'/api/resource/User/Administrator'
frappe.errprint(url)
frappe.errprint('data='+json.dumps(vldt))
response = requests.put(url, data='data='+json.dumps(vldt), headers=headers)
item_code = frappe.db.sql("""select b.item_code from `tabSales Invoice` a, `tabSales Invoice Item` b where a.name=b.parent and a.customer=%s """, res[0][1])
for ic in item_code:
qr="select no_of_users,validity from `tabItem` where name = '"+cstr(ic[0])+"'"
pro = frappe.db.sql(qr)
frappe.errprint(pro)
if (pro [0][0]== 0) and (pro[0][1]>0):
frappe.errprint("0 and >0")
vldt={}
vldt['validity']=pro[0][1]
vldt['country']=cnt
vldt['email_id_admin']=eml
url = 'http://'+st+'/api/resource/User/Administrator'
frappe.errprint(url)
frappe.errprint('data='+json.dumps(vldt))
response = requests.put(url, data='data='+json.dumps(vldt), headers=headers)
frappe.errprint("responce")
frappe.errprint(response.text)
elif (pro [0][0]>0 ) and (pro[0][1]==0):
frappe.errprint(">0 and 0")
vldtt={}
vldtt['no_of_users']=pro[0][0]
vldtt['country']=cnt
vldtt['email_id_admin']=eml
url = 'http://'+st+'/api/resource/User/Administrator'
frappe.errprint(url)
frappe.errprint('data='+json.dumps(vldtt))
response = requests.put(url, data='data='+json.dumps(vldtt), headers=headers)
frappe.errprint("responce")
frappe.errprint(response.text)
elif (pro [0][0]> 0) and (pro[0][1]>0):
frappe.errprint(" >0 and >0")
user_val={}
user_val['validity']=pro [0][1]
user_val['user_name']=pro [0][0]
user_val['flag']='false'
url = 'http://'+st+'/api/resource/User Validity'
frappe.errprint(url)
frappe.errprint('data='+json.dumps(user_val))
response = requests.post(url, data='data='+json.dumps(user_val), headers=headers)
frappe.errprint("responce")
frappe.errprint(response.text)
else:
frappe.errprint("0 and 0")
def assign_support():
frappe.errprint("assign suppoert tickets")
from frappe.utils import get_url, cstr
if get_url()=='http://stich1.tailorpad.com':
check_entry = frappe.db.sql("""select name,raised_by from `tabSupport Ticket` where assign_to is null and raised_by is not null and status<>'Closed'""")
frappe.errprint(check_entry)
for name,raised_by in check_entry :
frappe.errprint([name,raised_by])
assign_to = frappe.db.sql("""select assign_to from `tabAssing Master` where name= %s""",raised_by)
#frappe.errprint(assign_to[0][0])
if assign_to :
aa="update `tabSupport Ticket` set assign_to='"+cstr(assign_to[0][0])+"' where name = '"+name+"'"
frappe.errprint(aa)
frappe.db.sql(aa)
else :
aa="update `tabSupport Ticket` set assign_to='Administrator' where name = '"+name+"'"
frappe.errprint(aa)
frappe.db.sql(aa)
def create_support():
frappe.errprint("creating suppoert tickets")
import requests
import json
pr2 = frappe.db.sql("""select site_name from `tabSubAdmin Info` """)
for site_name in pr2:
db_name=cstr(site_name[0]).split('.')[0]
db_name=db_name[:16]
abx="select name from `"+cstr(db_name)+"`.`tabSupport Ticket` where flag='false'"
#frappe.errprint(abx)
pr3 = frappe.db.sql(abx)
#frappe.errprint(pr3)
for sn in pr3:
login_details = {'usr': 'Administrator', 'pwd': 'admin'}
url = "http://"+cstr(site_name[0])+"/api/method/login"
headers = {'content-type': 'application/x-www-form-urlencoded'}
response = requests.post(url, data='data='+json.dumps(login_details), headers=headers)
#frappe.errprint("login in site")
frappe.errprint(response.text)
test = {}
url="http://"+cstr(site_name[0])+"/api/resource/Support Ticket/"+cstr(sn[0])
#frappe.errprint("fetching suppoert ticket")
response = requests.get(url)
#frappe.errprint(response.text)
support_ticket = eval(response.text).get('data')
del support_ticket['name']
del support_ticket['creation']
del support_ticket['modified']
del support_ticket['company']
url = "http://stich1.tailorpad.com/api/method/login"
#frappe.errprint("login in master for ticket creation")
response = requests.post(url, data='data='+json.dumps(login_details), headers=headers)
#frappe.errprint(response.text)
url = 'http://stich1.tailorpad.com/api/resource/Support Ticket'
response = requests.post(url, data='data='+json.dumps(support_ticket), headers=headers)
#frappe.errprint("create support ticket")
#frappe.errprint(response.text)
url="http://"+cstr(site_name[0])+"/api/resource/Support Ticket/"+cstr(sn[0])
support_ticket={}
#frappe.errprint()
support_ticket['flag']='True'
#frappe.errprint('data='+json.dumps(support_ticket))
response = requests.put(url, data='data='+json.dumps(support_ticket), headers=headers)
frappe.errprint("updated flag")
def create_feedback():
frappe.errprint("creating feed back")
import requests
import json
pr2 = frappe.db.sql("""select site_name from `tabSubAdmin Info`""")
for site_name in pr2:
#frappe.errprint(site_name)
db_name=cstr(site_name[0]).split('.')[0]
db_name=db_name[:16]
abx="select name from `"+cstr(db_name)+"`.`tabFeed Back` where flag='false'"
#frappe.errprint(abx)
pr3 = frappe.db.sql(abx)
#frappe.errprint(pr3)
for sn in pr3:
login_details = {'usr': 'Administrator', 'pwd': 'admin'}
url = "http://"+cstr(site_name[0])+"/api/method/login"
headers = {'content-type': 'application/x-www-form-urlencoded'}
response = requests.post(url, data='data='+json.dumps(login_details), headers=headers)
#frappe.errprint(response.text)
test = {}
url="http://"+cstr(site_name[0])+"/api/resource/Feed Back/"+cstr(sn[0])
response = requests.get(url)
#frappe.errprint(response.text)
support_ticket = eval(response.text).get('data')
del support_ticket['name']
del support_ticket['creation']
del support_ticket['modified']
#del support_ticket['company']
url = "http://stich1.tailorpad.com/api/method/login"
response = requests.post(url, data='data='+json.dumps(login_details), headers=headers)
frappe.errprint(response.text)
url = 'http://stich1.tailorpad.com/api/resource/Feed Back'
response = requests.post(url, data='data='+json.dumps(support_ticket), headers=headers)
frappe.errprint("create support ticket")
url="http://"+cstr(site_name[0])+"/api/resource/Feed Back/"+cstr(sn[0])
support_ticket={}
support_ticket['flag']='True'
frappe.errprint('data='+json.dumps(support_ticket))
response = requests.put(url, data='data='+json.dumps(support_ticket), headers=headers)
def add_validity():
frappe.errprint("in add validity function")
import requests
import json
from frappe.utils import nowdate, cstr,cint, flt, now, getdate, add_months
pr1 = frappe.db.sql("""select site_name from `tabSite Master` """)
for pr in pr1:
if pr[0].find('.')!= -1:
db=pr[0].split('.')[0][:16]
else:
db=pr[0][:16]
qry="select validity from `"+cstr(db)+"`.`tabUser` where name='administrator' and validity>0 "
#print qry
frappe.errprint(qry)
pp1 = frappe.db.sql(qry)
if pp1 :
headers = {'content-type': 'application/x-www-form-urlencoded'}
sup={'usr':'administrator','pwd':'admin'}
url = 'http://'+pr[0]+'/api/method/login'
response = requests.get(url, data=sup, headers=headers)
qry1="select name from `"+cstr(db)+"`.`tabUser` where validity_end_date <CURDATE()"
pp2 = frappe.db.sql(qry1)
for pp in pp2:
dt=add_months(getdate(nowdate()), cint(pp1[0][0]))
vldt={}
vldt['validity_start_date']=cstr(nowdate())
vldt['validity_end_date']=cstr(dt)
url = 'http://'+pr[0]+'/api/resource/User/'+cstr(name)
response = requests.put(url, data='data='+json.dumps(vldt), headers=headers)
qry2="select name,validity_end_date from `"+cstr(db)+"`.`tabUser` where validity_end_date >=CURDATE()"
pp3 = frappe.db.sql(qry2)
for name,validity_end_date in pp3:
dt=add_months(getdate(validity_end_date), cint(pp1[0][0]))
vldt={}
vldt['validity_end_date']=cstr(dt)
url = 'http://'+pr[0]+'/api/resource/User/'+cstr(name)
response = requests.put(url, data='data='+json.dumps(vldt), headers=headers)
vldt={}
vldt['validity']='0'
url = 'http://'+pr[0]+'/api/resource/User/administrator'
response = requests.put(url, data='data='+json.dumps(vldt), headers=headers)
def disable_user():
frappe.errprint("in disable user ")
import requests
import json
pr2 = frappe.db.sql("""select site_name from `tabSubAdmin Info`""")
for site_name in pr2:
db_name=cstr(site_name[0]).split('.')[0]
db_name=db_name[:16]
abx="select name from `"+cstr(db_name)+"`.`tabUser` where validity_end_date<=CURDATE()"
pr3 = frappe.db.sql(abx)
for sn in pr3:
headers = {'content-type': 'application/x-www-form-urlencoded'}
sup={'usr':'administrator','pwd':'admin'}
url = 'http://'+cstr(site_name[0])+'/api/method/login'
response = requests.get(url, data=sup, headers=headers)
url="http://"+cstr(site_name[0])+"/api/resource/User/"+cstr(sn[0])
support_ticket={}
support_ticket['enabled']=0
response = requests.put(url, data='data='+json.dumps(support_ticket), headers=headers)
def lead_sales_followup():
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='Sales follow up'")
qry="select lead_name,email_id,date(creation) from `tabLead` where DATEDIFF(curdate(),creation) <=64 and WEEKDAY(curdate())=0 and customer is null"
res = frappe.db.sql(qry)
for r in res:
#frappe.errprint(r)
msg=etemp[0][1].replace('first_name',r[0]).replace('act_date',cstr(r[2]))
sendmail(r[1], subject=etemp[0][0], msg = msg)
def promotional_follow():
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='Promotional Followup'")
#frappe.errprint(etemp)
qry="select lead_name,email_id,DATE_ADD(curdate(),INTERVAL 7 DAY) from `tabLead` where DATEDIFF(curdate(),creation) >=64 and WEEKDAY(curdate())=0 and customer is null"
res = frappe.db.sql(qry)
#frappe.errprint(res)
for r in res:
msg=etemp[0][1].replace('first_name',r[0]).replace('current_date+7',cstr(r[2]))
sendmail(r[1], subject=etemp[0][0], msg = msg)
def success_renewal(doc,method):
pass
# from frappe.utils.email_lib import sendmail
# #frappe.errprint("in success_renewal")
# etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='Successful renewal'")
# email_qry="select email_id__if_administrator from `tabSite Master` where client_name='"+doc.customer+"'"
# #frappe.errprint(email_qry)
# res=frappe.db.sql(email_qry)
# #frappe.errprint(res[0][0])
# date_query=frappe.db.sql("""select DATE_ADD(curdate(),INTERVAL 1 year)""")
# #frappe.errprint(res)
# #frappe.errprint(date_query[0][0])
# if res:
# msg=etemp[0][1].replace('first_name',doc.customer).replace('sub_end_date',cstr(date_query[0][0]))
# #frappe.errprint(msg)
# sendmail(res[0][0], subject=etemp[0][0], msg = msg)
def ticket_submission(doc,method):
from frappe.utils.email_lib import sendmail
frappe.errprint("ticket submission")
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='Contact us, Feedback & Ticket submission'")
#frappe.errprint(doc.name)
#frappe.errprint(doc.customer)
msg=etemp[0][1].replace('first_name',doc.customer).replace('ticket_number',doc.name)
sendmail(doc.raised_by, subject=etemp[0][0], msg = msg)
def feedback_submission(doc,method):
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='Contact us, Feedback & Ticket submission'")
query="select client_name from `tabSite Master` where email_id__if_administrator='"+doc.raised_by+"'"
#frappe.errprint(query)
res=frappe.db.sql(query)
#frappe.errprint(res[0][0])
msg=etemp[0][1].replace('first_name',res[0][0]).replace('ticket_number',doc.name)
sendmail(doc.raised_by, subject=etemp[0][0], msg = msg)
def before15_renewal_date():
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='15 days before renewal date'")
query="select client_name,expiry_date,email_id__if_administrator from `tabSite Master`"
res=frappe.db.sql(query)
for r in res:
qr="select datediff('"+cstr(r[1])+"',curdate())"
#frappe.errprint(qr)
diff=frappe.db.sql(qr)
#frappe.errprint(diff[0][0])
if(diff[0][0]==15):
msg=etemp[0][1].replace('first_name',r[0]).replace('renewal date',cstr(r[1]))
sendmail(r[2], subject=etemp[0][0], msg = msg)
def before1_renewal_date():
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='1 Working day before renewal date'")
query="select client_name,expiry_date,email_id__if_administrator from `tabSite Master`"
res=frappe.db.sql(query)
for r in res:
qr="select datediff('"+cstr(r[1])+"',curdate())"
#frappe.errprint(qr)
diff=frappe.db.sql(qr)
#frappe.errprint(diff[0][0])
if(diff[0][0]==1):
msg=etemp[0][1].replace('first_name',r[0]).replace('renewal date',cstr(r[1]))
sendmail(r[2], subject=etemp[0][0], msg = msg)
def on_renewal_date():
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='On renewal date'")
query="select client_name,expiry_date,email_id__if_administrator from `tabSite Master`"
res=frappe.db.sql(query)
for r in res:
qr="select datediff('"+cstr(r[1])+"',curdate())"
#frappe.errprint(qr)
diff=frappe.db.sql(qr)
#frappe.errprint(diff[0][0])
if(diff[0][0]==0):
msg=etemp[0][1].replace('first_name',r[0]).replace('Date',cstr(r[1]))
sendmail(r[2], subject=etemp[0][0], msg = msg)
def after1_exp_date():
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='1 working day after expiry date'")
query="select client_name,expiry_date,email_id__if_administrator from `tabSite Master`"
res=frappe.db.sql(query)
for r in res:
qr="select datediff(curdate(),'"+cstr(r[1])+"')"
#frappe.errprint(qr)
diff=frappe.db.sql(qr)
#frappe.errprint(diff[0][0])
if(diff[0][0]==(-1)):
msg=etemp[0][1].replace('first_name',r[0])
sendmail(r[2], subject=etemp[0][0], msg = msg)
def on_grace_date():
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='On 7th grace day'")
query="select client_name,expiry_date,email_id__if_administrator from `tabSite Master`"
res=frappe.db.sql(query)
for r in res:
qr="select datediff(curdate(),'"+cstr(r[1])+"')"
#frappe.errprint(qr)
diff=frappe.db.sql(qr)
#frappe.errprint(diff[0][0])
if(diff[0][0]==(-7)):
msg=etemp[0][1].replace('first_name',r[0])
sendmail(r[2], subject=etemp[0][0], msg = msg)
def after_grace_date():
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='On grace period expiry'")
query="select client_name,expiry_date,email_id__if_administrator from `tabSite Master`"
res=frappe.db.sql(query)
for r in res:
qr="select datediff(curdate(),'"+cstr(r[1])+"')"
#frappe.errprint(qr)
diff=frappe.db.sql(qr)
#frappe.errprint(diff[0][0])
if(diff[0][0]==(-8)):
msg=etemp[0][1].replace('first_name',r[0])
sendmail(r[2], subject=etemp[0][0], msg = msg)
def after_deactivation():
from frappe.utils.email_lib import sendmail
etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='1 working day after deactivation'")
query="select client_name,expiry_date,email_id__if_administrator from `tabSite Master`"
res=frappe.db.sql(query)
for r in res:
qr="select datediff(curdate(),'"+cstr(r[1])+"')"
#frappe.errprint(qr)
diff=frappe.db.sql(qr)
#frappe.errprint(diff[0][0])
if(diff[0][0]==(-9)):
msg=etemp[0][1].replace('first_name',r[0]).replace('Expiry date',cstr(r[1]))
sendmail(r[2], subject=etemp[0][0], msg = msg)
def on_success_renewal(doc,method):
pass
# from frappe.utils.email_lib import sendmail
# #frappe.errprint("in on success_renewal no 2")
# etemp=frappe.db.sql("select subject,message from `tabTemplate Types` where name='On successful renewal'")
# query="select name from `tabSales Invoice` where customer='"+doc.customer+"'"
# res=frappe.db.sql(query)
# if(res[0][0]):
# email_qry="select email_id__if_administrator from `tabSite Master` where client_name='"+doc.customer+"'"
# email_result=frappe.db.sql(email_qry)
# date=frappe.db.sql("select DATE_ADD(curdate(),INTERVAL 1 year)")
# msg=etemp[0][1].replace('first_name',doc.customer).replace('expiry_date',cstr(date[0][0]))
# if email_result:
# sendmail(email_result[0][0], subject=etemp[0][0], msg = msg)
| gangadhar-kadam/laganerp | erpnext/setup/doctype/site_master/site_master.py | Python | agpl-3.0 | 27,892 | [
"VisIt"
] | 164ad1e04afb9daf8d201fed4822a0695ad5be0f9930486eff838cf8a65595d8 |
import os
import sys
from setuptools import setup, find_packages
from tethys_apps.app_installation import custom_develop_command, custom_install_command
### Apps Definition ###
app_package = 'ssw_downloader'
release_package = 'tethysapp-' + app_package
app_class = 'ssw_downloader.app:SswDownloader'
app_package_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tethysapp', app_package)
### Python Dependencies ###
dependencies = []
setup(
name=release_package,
version='0.0.0',
description="Downloads and aggregates netCDF files from NASA's Simple Subset Wizard",
long_description='',
keywords='',
author='Scott Christensen',
author_email='sdc50@byu.net',
url='',
license='BSD2',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['tethysapp', 'tethysapp.' + app_package],
include_package_data=True,
zip_safe=False,
install_requires=dependencies,
cmdclass={
'install': custom_install_command(app_package, app_package_dir, dependencies),
'develop': custom_develop_command(app_package, app_package_dir, dependencies)
}
)
| CI-WATER/tethysapp-ssw_downloader | setup.py | Python | bsd-2-clause | 1,153 | [
"NetCDF"
] | e806381e09f60ba3184f79e07028f816e31cf2cf3595fa3c60502f8bee29e3f1 |
'''Grid cell network neuron definitions.
.. currentmodule:: grid_cell_model.models.gc_neurons
A helper module to set up parameters of E and I populations.
Functions
---------
.. autosummary::
getENeuronParams
getINeuronParams
'''
from __future__ import absolute_import, print_function, division
import numpy as np
__all__ = ['getENeuronParams', 'getINeuronParams']
def getENeuronParams(no):
'''
Return a dictionary of E neuron parameters, using `no`: neuron options
object.
'''
return {
"V_m" : no.EL_e,
"C_m" : no.taum_e * no.gL_e,
"t_ref" : no.t_ref_e,
"V_peak" : no.V_peak_e,
"V_reset" : no.Vr_e,
"E_L" : no.EL_e,
"g_L" : no.gL_e,
"Delta_T" : no.deltaT_e,
"V_th" : no.Vt_e,
"E_AMPA" : no.E_AMPA,
"E_GABA_A" : no.E_GABA_A,
"tau_AMPA_fall" : no.tau_AMPA,
"tau_NMDA_fall" : no.tau_NMDA_fall,
"tau_GABA_A_fall" : no.tau_GABA_A_fall,
"tau_AHP" : no.tau_AHP_e,
"E_AHP" : no.E_AHP_e,
"g_AHP_max" : no.g_AHP_e_max,
"g_AHP_ad" : False,
"I_const" : no.Iext_e_const,
"I_ac_amp" : no.Iext_e_theta,
"I_ac_freq" : no.theta_freq,
"I_ac_phase" : -np.pi/2,
"I_ac_start_t" : no.theta_start_t,
"I_noise_std" : no.noise_sigma,
"V_clamp" : no.Vclamp,
"C_Mg" : no.C_Mg,
"rat_pos_x" : [],
"rat_pos_y" : []}
def getINeuronParams(no):
'''
Return a dictionary of I neuron parameters, using `no`: neuron options
object.
'''
return {
"V_m" : no.EL_i,
"C_m" : no.taum_i * no.gL_i,
"t_ref" : no.t_ref_i,
"V_peak" : no.V_peak_i,
"V_reset" : no.Vr_i,
"E_L" : no.EL_i,
"g_L" : no.gL_i,
"Delta_T" : no.deltaT_i,
"V_th" : no.Vt_i,
"E_AMPA" : no.E_AMPA,
"E_GABA_A" : no.E_GABA_A,
"tau_AMPA_fall" : no.tau_AMPA,
"tau_NMDA_fall" : no.tau_NMDA_fall,
"tau_GABA_A_fall" : no.tau_GABA_A_fall,
"tau_AHP" : no.ad_tau_i_mean,
"E_AHP" : no.EL_i, # AHP has a role of adaptation here
"g_AHP_max" : no.ad_i_g_inc,
"g_AHP_ad" : True,
"I_const" : no.Iext_i_const,
"I_ac_amp" : no.Iext_i_theta,
"I_ac_freq" : no.theta_freq,
"I_ac_phase" : -np.pi/2,
"I_ac_start_t" : no.theta_start_t,
"I_noise_std" : no.noise_sigma,
"V_clamp" : no.Vclamp,
"g_NMDA_fraction" : no.NMDA_amount,
"C_Mg" : no.C_Mg,
"rat_pos_x" : [],
"rat_pos_y" : []}
| MattNolanLab/ei-attractor | grid_cell_model/models/gc_neurons.py | Python | gpl-3.0 | 3,301 | [
"NEURON"
] | 035e4a0090d27cd965ddbadfdc2c2672bf431b6c93052e15d797b3a53970e6b2 |
#### PATTERN | GRAPH ###############################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import os
import sys
from math import sqrt, pow
from math import sin, cos, atan2, degrees, radians, pi
from random import random
from heapq import heappush, heappop
from warnings import warn
from codecs import open
from shutil import rmtree
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
if sys.version > "3":
long = int
# float("inf") doesn't work on windows.
INFINITE = 1e20
#--- LIST FUNCTIONS --------------------------------------------------------------------------------
def unique(iterable):
""" Returns a list copy in which each item occurs only once (in-order).
"""
seen = set()
return [x for x in iterable if x not in seen and not seen.add(x)]
#--- DRAWING FUNCTIONS -----------------------------------------------------------------------------
# This module is standalone (i.e., it is not a graph rendering package).
# If you want to call Graph.draw() then line(), ellipse() and Text.draw() must be implemented.
def line(x1, y1, x2, y2, stroke=(0,0,0,1), strokewidth=1):
""" Draws a line from (x1, y1) to (x2, y2) using the given stroke color and stroke width.
"""
pass
def ellipse(x, y, width, height, fill=(0,0,0,1), stroke=None, strokewidth=1):
""" Draws an ellipse at (x, y) with given fill and stroke color and stroke width.
"""
pass
class Text(object):
def __init__(self, string, **kwargs):
""" Draws the node label.
Optional properties include width, fill, font, fontsize, fontweight.
"""
self.string = string
self.__dict__.update(kwargs)
def copy(self):
k = self.__dict__.copy()
k.pop("string")
return Text(self.string, **k)
def draw(self):
pass
class Vector(object):
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def coordinates(x, y, distance, angle):
return (
(x + distance * cos(radians(angle))),
(y + distance * sin(radians(angle)))
)
#--- DEEPCOPY --------------------------------------------------------------------------------------
def deepcopy(o):
""" Returns a deep (recursive) copy of the given object.
"""
if o is None:
return o
if hasattr(o, "copy"):
return o.copy()
if isinstance(o, (basestring, bool, int, float, long, complex)):
return o
if isinstance(o, (list, tuple, set)):
return o.__class__(deepcopy(v) for v in o)
if isinstance(o, dict):
return dict((deepcopy(k), deepcopy(v)) for k,v in o.items())
raise Exception("don't know how to copy %s" % o.__class__.__name__)
#### NODE ##########################################################################################
#--- NODE ------------------------------------------------------------------------------------------
class Node(object):
def __init__(self, id="", radius=5, **kwargs):
""" A node with a unique id in the graph.
Node.id is drawn as a text label, unless optional parameter text=False.
Optional parameters include: fill, stroke, strokewidth, text, font, fontsize, fontweight.
"""
self.graph = None
self.links = Links()
self.id = id
self._x = 0.0 # Calculated by Graph.layout.update().
self._y = 0.0 # Calculated by Graph.layout.update().
self.force = Vector(0.0, 0.0)
self.radius = radius
self.fixed = kwargs.pop("fixed", False)
self.fill = kwargs.pop("fill", None)
self.stroke = kwargs.pop("stroke", (0,0,0,1))
self.strokewidth = kwargs.pop("strokewidth", 1)
self.text = kwargs.get("text", True) and \
Text(isinstance(id, unicode) and id or str(id).decode("utf-8", "ignore"),
width = 85,
fill = kwargs.pop("text", (0,0,0,1)),
fontsize = kwargs.pop("fontsize", 11), **kwargs) or None
self._weight = None # Calculated by Graph.eigenvector_centrality().
self._centrality = None # Calculated by Graph.betweenness_centrality().
@property
def _distance(self):
# Graph.distance controls the (x,y) spacing between nodes.
return self.graph and float(self.graph.distance) or 1.0
def _get_x(self):
return self._x * self._distance
def _get_y(self):
return self._y * self._distance
def _set_x(self, v):
self._x = v / self._distance
def _set_y(self, v):
self._y = v / self._distance
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
@property
def edges(self):
""" Yields a list of edges from/to the node.
"""
return self.graph is not None \
and [e for e in self.graph.edges if self.id in (e.node1.id, e.node2.id)] \
or []
@property
def edge(self, node, reverse=False):
""" Yields the Edge from this node to the given node, or None.
"""
if not isinstance(node, Node):
node = self.graph and self.graph.get(node) or node
if reverse:
return node.links.edge(self)
return self.links.edge(node)
@property
def weight(self):
""" Yields eigenvector centrality as a number between 0.0-1.0.
"""
if self.graph and self._weight is None:
self.graph.eigenvector_centrality()
return self._weight
@property
def centrality(self):
""" Yields betweenness centrality as a number between 0.0-1.0.
"""
if self.graph and self._centrality is None:
self.graph.betweenness_centrality()
return self._centrality
eigenvector = eigenvector_centrality = weight
betweenness = betweenness_centrality = centrality
@property
def degree(self):
""" Yields degree centrality as a number between 0.0-1.0.
"""
return self.graph and (1.0 * len(self.links) / len(self.graph)) or 0.0
def flatten(self, depth=1, traversable=lambda node, edge: True, _visited=None):
""" Recursively lists the node and nodes linked to it.
Depth 0 returns a list with the node.
Depth 1 returns a list with the node and all the directly linked nodes.
Depth 2 includes the linked nodes' links, and so on.
"""
_visited = _visited or {}
_visited[self.id] = (self, depth)
if depth >= 1:
for n in self.links:
if n.id not in _visited or _visited[n.id][1] < depth-1:
if traversable(self, self.links.edges[n.id]):
n.flatten(depth-1, traversable, _visited)
return [n for n,d in _visited.values()] # Fast, but not order-preserving.
def draw(self, weighted=False):
""" Draws the node as a circle with the given radius, fill, stroke and strokewidth.
Draws the node centrality as a shadow effect when weighted=True.
Draws the node text label.
Override this method in a subclass for custom drawing.
"""
# Draw the node weight as a shadow (based on node betweenness centrality).
if weighted is not False and self.centrality > (weighted==True and -1 or weighted):
w = self.centrality * 35
ellipse(
self.x,
self.y,
self.radius*2 + w,
self.radius*2 + w, fill=(0,0,0,0.2), stroke=None)
# Draw the node.
ellipse(
self.x,
self.y,
self.radius*2,
self.radius*2, fill=self.fill, stroke=self.stroke, strokewidth=self.strokewidth)
# Draw the node text label.
if self.text:
self.text.draw(
self.x + self.radius,
self.y + self.radius)
def contains(self, x, y):
""" Returns True if the given coordinates (x, y) are inside the node radius.
"""
return abs(self.x - x) < self.radius*2 and \
abs(self.y - y) < self.radius*2
def __repr__(self):
return "%s(id=%s)" % (self.__class__.__name__, repr(self.id))
def __eq__(self, node):
return isinstance(node, Node) and self.id == node.id
def __ne__(self, node):
return not self.__eq__(node)
#--- NODE LINKS ------------------------------------------------------------------------------------
class Links(list):
def __init__(self):
""" A list in which each node has an associated edge.
The Links.edge() method returns the edge for a given node id.
"""
self.edges = dict()
def append(self, node, edge=None):
if node.id not in self.edges:
list.append(self, node)
self.edges[node.id] = edge
def remove(self, node):
list.remove(self, node)
self.edges.pop(node.id, None)
def edge(self, node):
return self.edges.get(isinstance(node, Node) and node.id or node)
#### EDGE ##########################################################################################
class Edge(object):
def __init__(self, node1, node2, weight=0.0, length=1.0, type=None, stroke=(0,0,0,1), strokewidth=1):
""" A connection between two nodes.
Its weight indicates the importance (not the cost) of the connection.
Its type is useful in a semantic network (e.g. "is-a", "is-part-of", ...)
"""
self.node1 = node1
self.node2 = node2
self._weight = weight
self.length = length
self.type = type
self.stroke = stroke
self.strokewidth = strokewidth
def _get_weight(self):
return self._weight
def _set_weight(self, v):
self._weight = v
# Clear cached adjacency map in the graph, since edge weights have changed.
if self.node1.graph is not None:
self.node1.graph._adjacency = None
if self.node2.graph is not None:
self.node2.graph._adjacency = None
weight = property(_get_weight, _set_weight)
def draw(self, weighted=False, directed=False):
""" Draws the edge as a line with the given stroke and strokewidth (increased with Edge.weight).
Override this method in a subclass for custom drawing.
"""
w = weighted and self.weight or 0
line(
self.node1.x,
self.node1.y,
self.node2.x,
self.node2.y, stroke=self.stroke, strokewidth=self.strokewidth+w)
if directed:
self.draw_arrow(stroke=self.stroke, strokewidth=self.strokewidth+w)
def draw_arrow(self, **kwargs):
""" Draws the direction of the edge as an arrow on the rim of the receiving node.
"""
x0, y0 = self.node1.x, self.node1.y
x1, y1 = self.node2.x, self.node2.y
# Find the edge's angle based on node1 and node2 position.
a = degrees(atan2(y1-y0, x1-x0))
# The arrow points to node2's rim instead of it's center.
r = self.node2.radius
d = sqrt(pow(x1-x0, 2) + pow(y1-y0, 2))
x01, y01 = coordinates(x0, y0, d-r-1, a)
# Find the two other arrow corners under the given angle.
r = max(kwargs.get("strokewidth", 1) * 3, 6)
dx1, dy1 = coordinates(x01, y01, -r, a-20)
dx2, dy2 = coordinates(x01, y01, -r, a+20)
line(x01, y01, dx1, dy1, **kwargs)
line(x01, y01, dx2, dy2, **kwargs)
line(dx1, dy1, dx2, dy2, **kwargs)
def __repr__(self):
return "%s(id1=%s, id2=%s)" % (self.__class__.__name__, repr(self.node1.id), repr(self.node2.id))
#### GRAPH #########################################################################################
#--- GRAPH NODE DICTIONARY -------------------------------------------------------------------------
class nodedict(dict):
def __init__(self, graph, *args, **kwargs):
""" Graph.shortest_paths() and Graph.eigenvector_centrality() return a nodedict,
where dictionary values can be accessed by Node as well as by node id.
"""
dict.__init__(self, *args, **kwargs)
self.graph = graph
def __contains__(self, node):
return dict.__contains__(self, self.graph.get(node, node))
def __getitem__(self, node):
return dict.__getitem__(self, isinstance(node, Node) and node or self.graph[node])
def get(self, node, default=None):
return dict.get(self, self.graph.get(node, node), default)
#--- GRAPH -----------------------------------------------------------------------------------------
# Graph layouts:
SPRING = "spring"
# Graph node centrality:
EIGENVECTOR = "eigenvector"
BETWEENNESS = "betweenness"
DEGREE = "degree"
# Graph node sort order:
WEIGHT, CENTRALITY = "weight", "centrality"
ALL = "all"
class Graph(dict):
def __init__(self, layout=SPRING, distance=10.0):
""" A network of nodes connected by edges that can be drawn with a given layout.
"""
self.nodes = [] # List of Node objects.
self.edges = [] # List of Edge objects.
self.root = None
self._adjacency = None # Cached adjacency() dict.
self.layout = layout == SPRING and GraphSpringLayout(self) or GraphLayout(self)
self.distance = distance
def __getitem__(self, id):
try:
return dict.__getitem__(self, id)
except KeyError:
raise KeyError("no node with id '%s' in graph" % id)
def append(self, base, *args, **kwargs):
""" Appends a Node or Edge to the graph: Graph.append(Node, id="rabbit").
"""
kwargs["base"] = base
if issubclass(base, Node):
return self.add_node(*args, **kwargs)
if issubclass(base, Edge):
return self.add_edge(*args, **kwargs)
def add_node(self, id, *args, **kwargs):
""" Appends a new Node to the graph.
An optional base parameter can be used to pass a subclass of Node.
"""
n = kwargs.pop("base", Node)
n = isinstance(id, Node) and id or self.get(id) or n(id, *args, **kwargs)
if n.id not in self:
self.nodes.append(n)
self[n.id] = n; n.graph = self
self.root = kwargs.get("root", False) and n or self.root
# Clear adjacency cache.
self._adjacency = None
return n
def add_edge(self, id1, id2, *args, **kwargs):
""" Appends a new Edge to the graph.
An optional base parameter can be used to pass a subclass of Edge:
Graph.add_edge("cold", "winter", base=IsPropertyOf)
"""
# Create nodes that are not yet part of the graph.
n1 = self.add_node(id1)
n2 = self.add_node(id2)
# Creates an Edge instance.
# If an edge (in the same direction) already exists, yields that edge instead.
e1 = n1.links.edge(n2)
if e1 and e1.node1 == n1 and e1.node2 == n2:
return e1
e2 = kwargs.pop("base", Edge)
e2 = e2(n1, n2, *args, **kwargs)
self.edges.append(e2)
# Synchronizes Node.links:
# A.links.edge(B) yields edge A->B
# B.links.edge(A) yields edge B->A
n1.links.append(n2, edge=e2)
n2.links.append(n1, edge=e1 or e2)
# Clear adjacency cache.
self._adjacency = None
return e2
def remove(self, x):
""" Removes the given Node (and all its edges) or Edge from the graph.
Note: removing Edge a->b does not remove Edge b->a.
"""
if isinstance(x, Node) and x.id in self:
self.pop(x.id)
self.nodes.remove(x); x.graph = None
# Remove all edges involving the given node.
for e in list(self.edges):
if x in (e.node1, e.node2):
if x in e.node1.links: e.node1.links.remove(x)
if x in e.node2.links: e.node2.links.remove(x)
self.edges.remove(e)
if isinstance(x, Edge):
self.edges.remove(x)
# Clear adjacency cache.
self._adjacency = None
def node(self, id):
""" Returns the node in the graph with the given id.
"""
if isinstance(id, Node) and id.graph == self:
return id
return self.get(id, None)
def edge(self, id1, id2):
""" Returns the edge between the nodes with given id1 and id2.
"""
if isinstance(id1, Node) and id1.graph == self:
id1 = id1.id
if isinstance(id2, Node) and id2.graph == self:
id2 = id2.id
return id1 in self and id2 in self and self[id1].links.edge(id2) or None
def paths(self, node1, node2, length=4, path=[]):
""" Returns a list of paths (shorter than or equal to given length) connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
return [[self[id] for id in p] for p in paths(self, node1.id, node2.id, length, path)]
def shortest_path(self, node1, node2, heuristic=None, directed=False):
""" Returns a list of nodes connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
try:
p = dijkstra_shortest_path(self, node1.id, node2.id, heuristic, directed)
p = [self[id] for id in p]
return p
except IndexError:
return None
def shortest_paths(self, node, heuristic=None, directed=False):
""" Returns a dictionary of nodes, each linked to a list of nodes (shortest path).
"""
if not isinstance(node, Node):
node = self[node]
p = nodedict(self)
for id, path in dijkstra_shortest_paths(self, node.id, heuristic, directed).items():
p[self[id]] = path and [self[id] for id in path] or None
return p
def eigenvector_centrality(self, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Calculates eigenvector centrality and returns a node => weight dictionary.
Node.weight is updated in the process.
Node.weight is higher for nodes with a lot of (indirect) incoming traffic.
"""
ec = eigenvector_centrality(self, normalized, reversed, rating, iterations, tolerance)
ec = nodedict(self, ((self[id], w) for id, w in ec.items()))
for n, w in ec.items():
n._weight = w
return ec
def betweenness_centrality(self, normalized=True, directed=False):
""" Calculates betweenness centrality and returns a node => weight dictionary.
Node.centrality is updated in the process.
Node.centrality is higher for nodes with a lot of passing traffic.
"""
bc = brandes_betweenness_centrality(self, normalized, directed)
bc = nodedict(self, ((self[id], w) for id, w in bc.items()))
for n, w in bc.items():
n._centrality = w
return bc
def sorted(self, order=WEIGHT, threshold=0.0):
""" Returns a list of nodes sorted by WEIGHT or CENTRALITY.
Nodes with a lot of traffic will be at the start of the list.
"""
o = lambda node: getattr(node, order)
nodes = ((o(n), n) for n in self.nodes if o(n) >= threshold)
nodes = reversed(sorted(nodes))
return [n for w, n in nodes]
def prune(self, depth=0):
""" Removes all nodes with less or equal links than depth.
"""
for n in (n for n in self.nodes if len(n.links) <= depth):
self.remove(n)
def fringe(self, depth=0, traversable=lambda node, edge: True):
""" For depth=0, returns the list of leaf nodes (nodes with only one connection).
For depth=1, returns the list of leaf nodes and their connected nodes, and so on.
"""
u = []; [u.extend(n.flatten(depth, traversable)) for n in self.nodes if len(n.links) == 1]
return unique(u)
@property
def density(self):
""" Yields the number of edges vs. the maximum number of possible edges.
For example, <0.35 => sparse, >0.65 => dense, 1.0 => complete.
"""
return 2.0*len(self.edges) / (len(self.nodes) * (len(self.nodes)-1))
@property
def is_complete(self):
return self.density == 1.0
@property
def is_dense(self):
return self.density > 0.65
@property
def is_sparse(self):
return self.density < 0.35
def split(self):
""" Returns the list of unconnected subgraphs.
"""
return partition(self)
def update(self, iterations=10, **kwargs):
""" Graph.layout.update() is called the given number of iterations.
"""
for i in range(iterations):
self.layout.update(**kwargs)
def draw(self, weighted=False, directed=False):
""" Draws all nodes and edges.
"""
for e in self.edges:
e.draw(weighted, directed)
for n in reversed(self.nodes): # New nodes (with Node._weight=None) first.
n.draw(weighted)
def node_at(self, x, y):
""" Returns the node at (x,y) or None.
"""
for n in self.nodes:
if n.contains(x, y): return n
def _add_node_copy(self, n, **kwargs):
# Magical fairy dust to copy subclasses of Node.
# We assume that the subclass constructor takes an optional "text" parameter
# (Text objects in NodeBox for OpenGL's implementation are expensive).
try:
new = self.add_node(n.id, root=kwargs.get("root",False), text=False)
except TypeError:
new = self.add_node(n.id, root=kwargs.get("root",False))
new.__class__ = n.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in n.__dict__.items()
if k not in ("graph", "links", "_x", "_y", "force", "_weight", "_centrality"))
def _add_edge_copy(self, e, **kwargs):
if kwargs.get("node1", e.node1).id not in self \
or kwargs.get("node2", e.node2).id not in self:
return
new = self.add_edge(
kwargs.get("node1", self[e.node1.id]),
kwargs.get("node2", self[e.node2.id]))
new.__class__ = e.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in e.__dict__.items()
if k not in ("node1", "node2"))
def copy(self, nodes=ALL):
""" Returns a copy of the graph with the given list of nodes (and connecting edges).
The layout will be reset.
"""
g = Graph(layout=None, distance=self.distance)
g.layout = self.layout.copy(graph=g)
for n in (nodes==ALL and self.nodes or (isinstance(n, Node) and n or self[n] for n in nodes)):
g._add_node_copy(n, root=self.root==n)
for e in self.edges:
g._add_edge_copy(e)
return g
def export(self, *args, **kwargs):
export(self, *args, **kwargs)
def write(self, *args, **kwargs):
write(self, *args, **kwargs)
def serialize(self, *args, **kwargs):
return render(self, *args, **kwargs)
#--- GRAPH LAYOUT ----------------------------------------------------------------------------------
# Graph drawing or graph layout, as a branch of graph theory,
# applies topology and geometry to derive two-dimensional representations of graphs.
class GraphLayout(object):
def __init__(self, graph):
""" Calculates node positions iteratively when GraphLayout.update() is called.
"""
self.graph = graph
self.iterations = 0
def update(self):
self.iterations += 1
def reset(self):
self.iterations = 0
for n in self.graph.nodes:
n._x = 0.0
n._y = 0.0
n.force = Vector(0.0, 0.0)
@property
def bounds(self):
""" Returns a (x, y, width, height)-tuple of the approximate layout dimensions.
"""
x0, y0 = +INFINITE, +INFINITE
x1, y1 = -INFINITE, -INFINITE
for n in self.graph.nodes:
if (n.x < x0): x0 = n.x
if (n.y < y0): y0 = n.y
if (n.x > x1): x1 = n.x
if (n.y > y1): y1 = n.y
return (x0, y0, x1-x0, y1-y0)
def copy(self, graph):
return GraphLayout(self, graph)
#--- GRAPH LAYOUT: FORCE-BASED ---------------------------------------------------------------------
class GraphSpringLayout(GraphLayout):
def __init__(self, graph):
""" A force-based layout in which edges are regarded as springs.
The forces are applied to the nodes, pulling them closer or pushing them apart.
"""
# Based on: http://snipplr.com/view/1950/graph-javascript-framework-version-001/
GraphLayout.__init__(self, graph)
self.k = 4.0 # Force constant.
self.force = 0.01 # Force multiplier.
self.repulsion = 50 # Maximum repulsive force radius.
def _distance(self, node1, node2):
# Yields a tuple with distances (dx, dy, d, d**2).
# Ensures that the distance is never zero (which deadlocks the animation).
dx = node2._x - node1._x
dy = node2._y - node1._y
d2 = dx * dx + dy * dy
if d2 < 0.01:
dx = random() * 0.1 + 0.1
dy = random() * 0.1 + 0.1
d2 = dx * dx + dy * dy
return dx, dy, sqrt(d2), d2
def _repulse(self, node1, node2):
# Updates Node.force with the repulsive force.
dx, dy, d, d2 = self._distance(node1, node2)
if d < self.repulsion:
f = self.k ** 2 / d2
node2.force.x += f * dx
node2.force.y += f * dy
node1.force.x -= f * dx
node1.force.y -= f * dy
def _attract(self, node1, node2, weight=0, length=1.0):
# Updates Node.force with the attractive edge force.
dx, dy, d, d2 = self._distance(node1, node2)
d = min(d, self.repulsion)
f = (d2 - self.k ** 2) / self.k * length
f *= weight * 0.5 + 1
f /= d
node2.force.x -= f * dx
node2.force.y -= f * dy
node1.force.x += f * dx
node1.force.y += f * dy
def update(self, weight=10.0, limit=0.5):
""" Updates the position of nodes in the graph.
The weight parameter determines the impact of edge weight.
The limit parameter determines the maximum movement each update().
"""
GraphLayout.update(self)
# Forces on all nodes due to node-node repulsions.
for i, n1 in enumerate(self.graph.nodes):
for j, n2 in enumerate(self.graph.nodes[i+1:]):
self._repulse(n1, n2)
# Forces on nodes due to edge attractions.
for e in self.graph.edges:
self._attract(e.node1, e.node2, weight * e.weight, 1.0 / (e.length or 0.01))
# Move nodes by given force.
for n in self.graph.nodes:
if not n.fixed:
n._x += max(-limit, min(self.force * n.force.x, limit))
n._y += max(-limit, min(self.force * n.force.y, limit))
n.force.x = 0
n.force.y = 0
def copy(self, graph):
g = GraphSpringLayout(graph)
g.k, g.force, g.repulsion = self.k, self.force, self.repulsion
return g
#### GRAPH ANALYSIS ################################################################################
#--- GRAPH SEARCH ----------------------------------------------------------------------------------
def depth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True, _visited=None):
""" Visits all the nodes connected to the given root node, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and subsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
"""
stop = visit(node)
_visited = _visited or {}
_visited[node.id] = True
for n in node.links:
if stop: return True
if traversable(node, node.links.edge(n)) is False: continue
if not n.id in _visited:
stop = depth_first_search(n, visit, traversable, _visited)
return stop
dfs = depth_first_search;
def breadth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True):
""" Visits all the nodes connected to the given root node, breadth-first.
"""
q = [node]
_visited = {}
while q:
node = q.pop(0)
if not node.id in _visited:
if visit(node):
return True
q.extend((n for n in node.links if traversable(node, node.links.edge(n)) is not False))
_visited[node.id] = True
return False
bfs = breadth_first_search;
def paths(graph, id1, id2, length=4, path=[], _root=True):
""" Returns a list of paths from node with id1 to node with id2.
Only paths shorter than or equal to the given length are included.
Uses a brute-force DFS approach (performance drops exponentially for longer paths).
"""
if len(path) >= length:
return []
if id1 not in graph:
return []
if id1 == id2:
return [path + [id1]]
path = path + [id1]
p = []
s = set(path) # 5% speedup.
for node in graph[id1].links:
if node.id not in s:
p.extend(paths(graph, node.id, id2, length, path, False))
return _root and sorted(p, key=len) or p
def edges(path):
""" Returns an iterator of Edge objects for the given list of nodes.
It yields None where two successive nodes are not connected.
"""
# For example, the distance (i.e., edge weight sum) of a path:
# sum(e.weight for e in edges(path))
return len(path) > 1 and (n.links.edge(path[i+1]) for i,n in enumerate(path[:-1])) or iter(())
#--- GRAPH ADJACENCY -------------------------------------------------------------------------------
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
""" Returns a dictionary indexed by node id1's,
in which each value is a dictionary of connected node id2's linking to the edge weight.
If directed=True, edges go from id1 to id2, but not the other way.
If stochastic=True, all the weights for the neighbors of a given node sum to 1.
A heuristic function can be given that takes two node id's and returns
an additional cost for movement between the two nodes.
"""
# Caching a heuristic from a method won't work.
# Bound method objects are transient,
# i.e., id(object.method) returns a new value each time.
if graph._adjacency is not None and \
graph._adjacency[1:] == (directed, reversed, stochastic, heuristic and heuristic.func_code):
return graph._adjacency[0]
map = {}
for n in graph.nodes:
map[n.id] = {}
for e in graph.edges:
id1, id2 = not reversed and (e.node1.id, e.node2.id) or (e.node2.id, e.node1.id)
map[id1][id2] = 1.0 - 0.5 * e.weight
if heuristic:
map[id1][id2] += heuristic(id1, id2)
if not directed:
map[id2][id1] = map[id1][id2]
if stochastic:
for id1 in map:
n = sum(map[id1].values())
for id2 in map[id1]:
map[id1][id2] /= n
# Cache the adjacency map: this makes dijkstra_shortest_path() 2x faster in repeated use.
graph._adjacency = (map, directed, reversed, stochastic, heuristic and heuristic.func_code)
return map
def dijkstra_shortest_path(graph, id1, id2, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest path between two nodes.
Returns a list of node id's, starting with id1 and ending with id2.
Raises an IndexError between nodes on unconnected graphs.
"""
# Based on: Connelly Barnes, http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
def flatten(list):
# Flattens a linked list of the form [0,[1,[2,[]]]]
while len(list) > 0:
yield list[0]; list=list[1]
G = adjacency(graph, directed=directed, heuristic=heuristic)
q = [(0, id1, ())] # Heap of (cost, path_head, path_rest).
visited = set() # Visited nodes.
while True:
(cost1, n1, path) = heappop(q)
if n1 not in visited:
visited.add(n1)
if n1 == id2:
return list(flatten(path))[::-1] + [n1]
path = (n1, path)
for (n2, cost2) in G[n1].items():
if n2 not in visited:
heappush(q, (cost1 + cost2, n2, path))
def dijkstra_shortest_paths(graph, id, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest paths from the given node to all other nodes.
Returns a dictionary of node id's, each linking to a list of node id's (i.e., the path).
"""
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.4.1: Aric Hagberg, Dan Schult and Pieter Swart.
# This is 5x faster than:
# for n in g: dijkstra_shortest_path(g, id, n.id)
W = adjacency(graph, directed=directed, heuristic=heuristic)
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
P[id] = [id]
seen = {id: 0}
heappush(Q, (0, id))
while Q:
(dist, v) = heappop(Q)
if v in D: continue
D[v] = dist
for w in W[v].keys():
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, w))
P[w] = P[v] + [w]
for n in graph:
if n not in P: P[n]=None
return P
def floyd_warshall_all_pairs_distance(graph, heuristic=None, directed=False):
""" Floyd-Warshall's algorithm for finding the path length for all pairs for nodes.
Returns a dictionary of node id's,
each linking to a dictionary of node id's linking to path length.
"""
from collections import defaultdict # Requires Python 2.5+.
g = graph.keys()
d = defaultdict(lambda: defaultdict(lambda: 1e30)) # float('inf')
p = defaultdict(dict) # Predecessors.
for e in graph.edges:
u = e.node1.id
v = e.node2.id
w = 1.0 - 0.5 * e.weight
w = heuristic and heuristic(u, v) + w or w
d[u][v] = min(w, d[u][v])
d[u][u] = 0
p[u][v] = u
if not directed:
d[v][u] = min(w, d[v][u])
p[v][u] = v
for w in g:
dw = d[w]
for u in g:
du, duw = d[u], d[u][w]
for v in g:
# Performance optimization, assumes d[w][v] > 0.
#if du[v] > duw + dw[v]:
if du[v] > duw and du[v] > duw + dw[v]:
d[u][v] = duw + dw[v]
p[u][v] = p[w][v]
class pdict(dict):
def __init__(self, predecessors, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.predecessors = predecessors
return pdict(p, ((u, dict((v, w) for v,w in d[u].items() if w < 1e30)) for u in d))
def predecessor_path(tree, u, v):
""" Returns the path between node u and node v as a list of node id's.
The given tree is the return value of floyd_warshall_all_pairs_distance().predecessors.
"""
def _traverse(u, v):
w = tree[u][v]
if w == u:
return []
return _traverse(u,w) + [w] + _traverse(w,v)
return [u] + _traverse(u,v) + [v]
#--- GRAPH CENTRALITY ------------------------------------------------------------------------------
def brandes_betweenness_centrality(graph, normalized=True, directed=False):
""" Betweenness centrality for nodes in the graph.
Betweenness centrality is a measure of the number of shortests paths that pass through a node.
Nodes in high-density areas will get a good score.
"""
# Ulrik Brandes, A Faster Algorithm for Betweenness Centrality,
# Journal of Mathematical Sociology 25(2):163-177, 2001,
# http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.0.1: Aric Hagberg, Dan Schult and Pieter Swart.
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
W = adjacency(graph, directed=directed)
b = dict.fromkeys(graph, 0.0)
for id in graph:
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
for n in graph: P[n]=[]
seen = {id: 0}
heappush(Q, (0, id, id))
S = []
E = dict.fromkeys(graph, 0) # sigma
E[id] = 1.0
while Q:
(dist, pred, v) = heappop(Q)
if v in D:
continue
D[v] = dist
S.append(v)
E[v] += E[pred]
for w in W[v]:
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, v, w))
P[w] = [v]
E[w] = 0.0
elif vw_dist == seen[w]: # Handle equal paths.
P[w].append(v)
E[w] += E[v]
d = dict.fromkeys(graph, 0.0)
for w in reversed(S):
for v in P[w]:
d[v] += (1.0 + d[w]) * E[v] / E[w]
if w != id:
b[w] += d[w]
# Normalize between 0.0 and 1.0.
m = normalized and max(b.values()) or 1
b = dict((id, w/m) for id, w in b.items())
return b
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (cfr. Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
"""
# Based on: NetworkX, Aric Hagberg (hagberg@lanl.gov)
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
# Note: much faster than betweenness centrality (which grows exponentially).
def normalize(vector):
w = 1.0 / (sum(vector.values()) or 1)
for node in vector:
vector[node] *= w
return vector
G = adjacency(graph, directed=True, reversed=reversed)
v = normalize(dict([(n, random()) for n in graph])) # Node ID => weight vector.
# Eigenvector calculation using the power iteration method: y = Ax.
# It has no guarantee of convergence.
for i in range(iterations):
v0 = v
v = dict.fromkeys(v0.keys(), 0)
for n1 in v:
for n2 in G[n1]:
v[n1] += 0.01 + v0[n2] * G[n1][n2] * rating.get(n1, 1)
normalize(v)
e = sum([abs(v[n]-v0[n]) for n in v]) # Check for convergence.
if e < len(G) * tolerance:
# Normalize between 0.0 and 1.0.
m = normalized and max(v.values()) or 1
v = dict((id, w/m) for id, w in v.items())
return v
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict((n, 0) for n in G)
#--- GRAPH PARTITIONING ----------------------------------------------------------------------------
# a | b => all elements from a and all the elements from b.
# a & b => elements that appear in a as well as in b.
# a - b => elements that appear in a but not in b.
def union(a, b):
return list(set(a) | set(b))
def intersection(a, b):
return list(set(a) & set(b))
def difference(a, b):
return list(set(a) - set(b))
def partition(graph):
""" Returns a list of unconnected subgraphs.
"""
# Creates clusters of nodes and directly connected nodes.
# Iteratively merges two clusters if they overlap.
g = []
for n in graph.nodes:
g.append(dict.fromkeys((n.id for n in n.flatten()), True))
for i in reversed(range(len(g))):
for j in reversed(range(i+1, len(g))):
if g[i] and g[j] and len(intersection(g[i], g[j])) > 0:
g[i] = union(g[i], g[j])
g[j] = []
g = [graph.copy(nodes=[graph[id] for id in n]) for n in g if n]
g.sort(lambda a, b: len(b) - len(a))
return g
def is_clique(graph):
""" A clique is a set of nodes in which each node is connected to all other nodes.
"""
#for n1 in graph.nodes:
# for n2 in graph.nodes:
# if n1 != n2 and graph.edge(n1.id, n2.id) is None:
# return False
return graph.density == 1.0
def clique(graph, id):
""" Returns the largest possible clique for the node with given id.
"""
if isinstance(id, Node):
id = id.id
a = [id]
for n in graph.nodes:
try:
# Raises StopIteration if all nodes in the clique are connected to n:
next(id for id in a if n.id==id or graph.edge(n.id, id) is None)
except StopIteration:
a.append(n.id)
return a
def cliques(graph, threshold=3):
""" Returns all cliques in the graph with at least the given number of nodes.
"""
a = []
for n in graph.nodes:
c = clique(graph, n.id)
if len(c) >= threshold:
c.sort()
if c not in a: a.append(c)
return a
#### GRAPH UTILITY FUNCTIONS #######################################################################
# Utility functions for safely linking and unlinking of nodes,
# with respect for the surrounding nodes.
def unlink(graph, node1, node2=None):
""" Removes the edges between node1 and node2.
If only node1 is given, removes all edges to and from it.
This does not remove node1 from the graph.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node) and node2 is not None:
node2 = graph[node2]
for e in list(graph.edges):
if node1 in (e.node1, e.node2) and node2 in (e.node1, e.node2, None):
graph.edges.remove(e)
try:
node1.links.remove(node2)
node2.links.remove(node1)
except: # 'NoneType' object has no attribute 'links'
pass
def redirect(graph, node1, node2):
""" Connects all of node1's edges to node2 and unlinks node1.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node):
node2 = graph[node2]
for e in graph.edges:
if node1 in (e.node1, e.node2):
if e.node1 == node1 and e.node2 != node2:
graph._add_edge_copy(e, node1=node2, node2=e.node2)
if e.node2 == node1 and e.node1 != node2:
graph._add_edge_copy(e, node1=e.node1, node2=node2)
unlink(graph, node1)
def cut(graph, node):
""" Unlinks the given node, but keeps edges intact by connecting the surrounding nodes.
If A, B, C, D are nodes and A->B, B->C, B->D, if we then cut B: A->C, A->D.
"""
if not isinstance(node, Node):
node = graph[node]
for e in graph.edges:
if node in (e.node1, e.node2):
for n in node.links:
if e.node1 == node and e.node2 != n:
graph._add_edge_copy(e, node1=n, node2=e.node2)
if e.node2 == node and e.node1 != n:
graph._add_edge_copy(e, node1=e.node1, node2=n)
unlink(graph, node)
def insert(graph, node, a, b):
""" Inserts the given node between node a and node b.
If A, B, C are nodes and A->B, if we then insert C: A->C, C->B.
"""
if not isinstance(node, Node):
node = graph[node]
if not isinstance(a, Node):
a = graph[a]
if not isinstance(b, Node):
b = graph[b]
for e in graph.edges:
if e.node1 == a and e.node2 == b:
graph._add_edge_copy(e, node1=a, node2=node)
graph._add_edge_copy(e, node1=node, node2=b)
if e.node1 == b and e.node2 == a:
graph._add_edge_copy(e, node1=b, node2=node)
graph._add_edge_copy(e, node1=node, node2=a)
unlink(graph, a, b)
#### GRAPH EXPORT ##################################################################################
class GraphRenderer(object):
def __init__(self, graph):
self.graph = graph
def serialize(self, *args, **kwargs):
pass
def export(self, path, *args, **kwargs):
pass
#--- GRAPH EXPORT: HTML5 <CANVAS> ELEMENT ---------------------------------------------------------
# Exports graphs to interactive web pages using graph.js.
def minify(js):
""" Returns a compressed Javascript string with comments and whitespace removed.
"""
import re
W = (
"\(\[\{\,\;\=\-\+\*\/",
"\)\]\}\,\;\=\-\+\*\/"
)
for a, b in (
(re.compile(r"\/\*.*?\*\/", re.S), ""), # multi-line comments /**/
(re.compile(r"\/\/.*"), ""), # singe line comments //
(re.compile(r";\n"), "; "), # statements (correctly) terminated with ;
(re.compile(r"[ \t]+"), " "), # spacing and indentation
(re.compile(r"[ \t]([\(\[\{\,\;\=\-\+\*\/])"), "\\1"),
(re.compile(r"([\)\]\}\,\;\=\-\+\*\/])[ \t]"), "\\1"),
(re.compile(r"\s+\n"), "\n"),
(re.compile(r"\n+"), "\n")):
js = a.sub(b, js)
return js.strip()
DEFAULT, INLINE = "default", "inline"
HTML, CANVAS, STYLE, CSS, SCRIPT, DATA = \
"html", "canvas", "style", "css", "script", "data"
class HTMLCanvasRenderer(GraphRenderer):
def __init__(self, graph, **kwargs):
self.graph = graph
self._source = \
"<!doctype html>\n" \
"<html>\n" \
"<head>\n" \
"\t<title>%s</title>\n" \
"\t<meta charset=\"utf-8\">\n" \
"\t%s\n" \
"\t<script type=\"text/javascript\" src=\"%scanvas.js\"></script>\n" \
"\t<script type=\"text/javascript\" src=\"%sgraph.js\"></script>\n" \
"</head>\n" \
"<body>\n" \
"\t<div id=\"%s\" style=\"width:%spx; height:%spx;\">\n" \
"\t\t<script type=\"text/canvas\">\n" \
"\t\t%s\n" \
"\t\t</script>\n" \
"\t</div>\n" \
"</body>\n" \
"</html>"
# HTML
self.title = "Graph" # <title>Graph</title>
self.javascript = None # Path to canvas.js + graph.js.
self.stylesheet = INLINE # Either None, INLINE, DEFAULT (style.css) or a custom path.
self.id = "graph" # <div id="graph">
self.ctx = "canvas.element"
self.width = 700 # Canvas width in pixels.
self.height = 500 # Canvas height in pixels.
# JS Graph
self.frames = 500 # Number of frames of animation.
self.fps = 30 # Frames per second.
self.ipf = 2 # Iterations per frame.
self.weighted = False # Indicate betweenness centrality as a shadow?
self.directed = False # Indicate edge direction with an arrow?
self.prune = None # None or int, calls Graph.prune() in Javascript.
self.pack = True # Shortens leaf edges, adds eigenvector weight to node radius.
# JS GraphLayout
self.distance = graph.distance # Node spacing.
self.k = graph.layout.k # Force constant.
self.force = graph.layout.force # Force dampener.
self.repulsion = graph.layout.repulsion # Repulsive force radius.
# Data
self.weight = [DEGREE, WEIGHT, CENTRALITY]
self.href = {} # Dictionary of Node.id => URL.
self.css = {} # Dictionary of Node.id => CSS classname.
# Default options.
# If a Node or Edge has one of these settings,
# it is not passed to Javascript to save bandwidth.
self.default = {
"radius": 5,
"fixed": False,
"fill": None,
"stroke": (0,0,0,1),
"strokewidth": 1,
"text": (0,0,0,1),
"fontsize": 11,
}
# Override settings from keyword arguments.
self.default.update(kwargs.pop("default", {}))
for k, v in kwargs.items():
setattr(self, k, v)
def _escape(self, s):
if isinstance(s, basestring):
return "\"%s\"" % s.replace("\"", "\\\"")
return s
def _rgba(self, clr):
# Color or tuple to a CSS "rgba(255,255,255,1.0)" string.
return "\"rgba(%s,%s,%s,%.2f)\"" % (int(clr[0]*255), int(clr[1]*255), int(clr[2]*255), clr[3])
@property
def data(self):
""" Yields a string of Javascript code that loads the nodes and edges into variable g,
which is a Javascript Graph object (see graph.js).
This can be the response of an XMLHttpRequest, after wich you move g into your own variable.
"""
return "".join(self._data())
def _data(self):
s = []
s.append("g = new Graph(%s, %s);\n" % (self.ctx, self.distance))
s.append("var n = {")
if len(self.graph.nodes) > 0:
s.append("\n")
# Translate node properties to Javascript dictionary (var n).
for n in self.graph.nodes:
p = []
if n._x != 0:
p.append("x:%i" % n._x) # 0
if n._y != 0:
p.append("y:%i" % n._y) # 0
if n.radius != self.default["radius"]:
p.append("radius:%.1f" % n.radius) # 5.0
if n.fixed != self.default["fixed"]:
p.append("fixed:%s" % repr(n.fixed).lower()) # false
if n.fill != self.default["fill"]:
p.append("fill:%s" % self._rgba(n.fill)) # [0,0,0,1.0]
if n.stroke != self.default["stroke"]:
p.append("stroke:%s" % self._rgba(n.stroke)) # [0,0,0,1.0]
if n.strokewidth != self.default["strokewidth"]:
p.append("strokewidth:%.1f" % n.strokewidth) # 0.5
if n.text is None:
p.append("text:false")
if n.text and n.text.fill != self.default["text"]:
p.append("text:%s" % self._rgba(n.text.fill)) # [0,0,0,1.0]
if n.text and "font" in n.text.__dict__:
p.append("font:\"%s\"" % n.text.__dict__["font"]) # "sans-serif"
if n.text and n.text.__dict__.get("fontsize", self.default["fontsize"]) != self.default["fontsize"]:
p.append("fontsize:%i" % int(max(1, n.text.fontsize)))
if n.text and "fontweight" in n.text.__dict__: # "bold"
p.append("fontweight:\"%s\"" % n.text.__dict__["fontweight"])
if n.text and n.text.string != n.id:
p.append("label:\"%s\"" % n.text.string)
if n.id in self.href:
p.append("href:\"%s\"" % self.href[n.id])
if n.id in self.css:
p.append("css:\"%s\"" % self.css[n.id])
s.append("\t%s: {%s},\n" % (self._escape(n.id), ", ".join(p)))
s[-1] = s[-1].rstrip(",\n") # Trailing comma breaks in IE.
s.append("\n};\n")
s.append("var e = [")
if len(self.graph.edges) > 0:
s.append("\n")
# Translate edge properties to Javascript dictionary (var e).
for e in self.graph.edges:
id1, id2 = self._escape(e.node1.id), self._escape(e.node2.id)
p = []
if e.weight != 0:
p.append("weight:%.2f" % e.weight) # 0.00
if e.length != 1:
p.append("length:%.2f" % e.length) # 1.00
if e.type is not None:
p.append("type:\"%s\"" % e.type) # "is-part-of"
if e.stroke != self.default["stroke"]:
p.append("stroke:%s" % self._rgba(e.stroke)) # [0,0,0,1.0]
if e.strokewidth != self.default["strokewidth"]:
p.append("strokewidth:%.2f" % e.strokewidth) # 0.5
s.append("\t[%s, %s, {%s}],\n" % (id1, id2, ", ".join(p)))
s[-1] = s[-1].rstrip(",\n") # Trailing comma breaks in IE.
s.append("\n];\n")
# Append the nodes to graph g.
s.append("for (var id in n) {\n"
"\tg.addNode(id, n[id]);\n"
"}\n")
# Append the edges to graph g.
s.append("for (var i=0; i < e.length; i++) {\n"
"\tvar n1 = g.nodeset[e[i][0]];\n"
"\tvar n2 = g.nodeset[e[i][1]];\n"
"\tg.addEdge(n1, n2, e[i][2]);\n"
"}")
return s
@property
def script(self):
""" Yields a string of canvas.js code.
A setup() function loads the nodes and edges into variable g (Graph),
A draw() function starts the animation and updates the layout of g.
"""
return "".join(self._script())
def _script(self):
s = [];
s.append("function setup(canvas) {\n")
s.append( "\tcanvas.size(%s, %s);\n" % (self.width, self.height))
s.append( "\tcanvas.fps = %s;\n" % (self.fps))
s.append( "\t" + "".join(self._data()).replace("\n", "\n\t"))
s.append( "\n")
# Apply the layout settings.
s.append( "\tg.layout.k = %s; // Force constant (= edge length).\n"
"\tg.layout.force = %s; // Repulsive strength.\n"
"\tg.layout.repulsion = %s; // Repulsive radius.\n" % (
self.k,
self.force,
self.repulsion))
# Apply eigenvector, betweenness and degree centrality.
if self.weight is True: s.append(
"\tg.eigenvectorCentrality();\n"
"\tg.betweennessCentrality();\n"
"\tg.degreeCentrality();\n")
if isinstance(self.weight, (list, tuple)):
if WEIGHT in self.weight: s.append(
"\tg.eigenvectorCentrality();\n")
if CENTRALITY in self.weight: s.append(
"\tg.betweennessCentrality();\n")
if DEGREE in self.weight: s.append(
"\tg.degreeCentrality();\n")
# Apply node weight to node radius.
if self.pack: s.append(
"\t// Apply Node.weight to Node.radius.\n"
"\tfor (var i=0; i < g.nodes.length; i++) {\n"
"\t\tvar n = g.nodes[i];\n"
"\t\tn.radius = n.radius + n.radius * n.weight;\n"
"\t}\n")
# Apply edge length (leaves get shorter edges).
if self.pack: s.append(
"\t// Apply Edge.length (leaves get shorter edges).\n"
"\tfor (var i=0; i < g.nodes.length; i++) {\n"
"\t\tvar e = g.nodes[i].edges();\n"
"\t\tif (e.length == 1) {\n"
"\t\t\te[0].length *= 0.2;\n"
"\t\t}\n"
"\t}\n")
# Apply pruning.
if self.prune is not None: s.append(
"\tg.prune(%s);\n" % self.prune)
# Implement <canvas> draw().
s.append("}\n")
s.append("function draw(canvas) {\n"
"\tif (g.layout.iterations <= %s) {\n"
"\t\tcanvas.clear();\n"
"\t\t//shadow();\n"
"\t\tstroke(0);\n"
"\t\tfill(0,0);\n"
"\t\tg.update(%s);\n"
"\t\tg.draw(%s, %s);\n"
"\t}\n"
"\tg.drag(canvas.mouse);\n"
"}" % (
int(self.frames),
int(self.ipf),
str(self.weighted).lower(),
str(self.directed).lower()))
return s
@property
def canvas(self):
""" Yields a string of HTML with a <div id="graph"> containing a <script type="text/canvas">.
The <div id="graph"> wrapper is required as a container for the node labels.
"""
s = [
"<div id=\"%s\" style=\"width:%spx; height:%spx;\">\n" % (self.id, self.width, self.height),
"\t<script type=\"text/canvas\">\n",
"\t\t%s\n" % self.script.replace("\n", "\n\t\t"),
"\t</script>\n",
"</div>"
]
return "".join(s)
@property
def style(self):
""" Yields a string of CSS for <div id="graph">.
"""
return \
"body { font: 11px sans-serif; }\n" \
"a { color: dodgerblue; }\n" \
"#%s canvas { }\n" \
"#%s .node-label { font-size: 11px; }\n" \
"#%s {\n" \
"\tdisplay: inline-block;\n" \
"\tposition: relative;\n" \
"\toverflow: hidden;\n" \
"\tborder: 1px solid #ccc;\n" \
"}" % (self.id, self.id, self.id)
@property
def html(self):
""" Yields a string of HTML to visualize the graph using a force-based spring layout.
The js parameter sets the path to graph.js and canvas.js.
"""
js = self.javascript or ""
if self.stylesheet == INLINE:
css = self.style.replace("\n","\n\t\t").rstrip("\t")
css = "<style type=\"text/css\">\n\t\t%s\n\t</style>" % css
elif self.stylesheet == DEFAULT:
css = "<link rel=\"stylesheet\" href=\"style.css\" type=\"text/css\" media=\"screen\" />"
elif self.stylesheet is not None:
css = "<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\" media=\"screen\" />" % self.stylesheet
else:
css = ""
s = self._script()
s = "".join(s)
s = "\t" + s.replace("\n", "\n\t\t\t")
s = s.rstrip()
s = self._source % (
self.title,
css,
js,
js,
self.id,
self.width,
self.height,
s)
return s
def serialize(self, type=HTML):
if type == HTML:
return self.html
if type == CANVAS:
return self.canvas
if type in (STYLE, CSS):
return self.style
if type == SCRIPT:
return self.script
if type == DATA:
return self.data
# Backwards compatibility.
render = serialize
def export(self, path, encoding="utf-8"):
""" Generates a folder at the given path containing an index.html
that visualizes the graph using the HTML5 <canvas> tag.
"""
if os.path.exists(path):
rmtree(path)
os.mkdir(path)
# Copy compressed graph.js + canvas.js (unless a custom path is given.)
if self.javascript is None:
for p, f in (("..", "canvas.js"), (".", "graph.js")):
a = open(os.path.join(MODULE, p, f), "r")
b = open(os.path.join(path, f), "w")
b.write(minify(a.read()))
b.close()
# Create style.css.
if self.stylesheet == DEFAULT:
f = open(os.path.join(path, "style.css"), "w")
f.write(self.style)
f.close()
# Create index.html.
f = open(os.path.join(path, "index.html"), "w", encoding=encoding)
f.write(self.html)
f.close()
#--- GRAPH EXPORT: GRAPHML ------------------------------------------------------------------------
# Exports graphs as GraphML XML, which can be read by Gephi (https://gephi.org).
# Author: Frederik Elwert <frederik.elwert@web.de>, 2014.
GRAPHML = "graphml"
class GraphMLRenderer(GraphRenderer):
def serialize(self, directed=False):
p = "tmp.graphml"
self.export(p, directed, encoding="utf-8")
s = open(p, encoding="utf-8").read()
os.unlink(p)
return s
def export(self, path, directed=False, encoding="utf-8"):
""" Generates a GraphML XML file at the given path.
"""
import xml.etree.ElementTree as etree
ns = "{http://graphml.graphdrawing.org/xmlns}"
etree.register_namespace("", ns.strip("{}"))
# Define type for node labels (string).
# Define type for node edges (float).
root = etree.Element(ns + "graphml")
root.insert(0, etree.Element(ns + "key", **{
"id": "node_label", "for": "node", "attr.name": "label", "attr.type": "string"
}))
root.insert(0, etree.Element(ns + "key", **{
"id": "edge_weight", "for": "edge", "attr.name": "weight", "attr.type": "double"
}))
# Map Node.id => GraphML node id.
m = {}
g = etree.SubElement(root, ns + "graph", id="g", edgedefault=directed and "directed" or "undirected")
# Export nodes.
for i, n in enumerate(self.graph.nodes):
m[n.id] = "node%s" % i
x = etree.SubElement(g, ns + "node", id=m[n.id])
x = etree.SubElement(x, ns + "data", key="node_label")
if n.text and n.text.string != n.id:
x.text = n.text.string
# Export edges.
for i, e in enumerate(self.graph.edges):
x = etree.SubElement(g, ns + "edge", id="edge%s" % i, source=m[e.node1.id], target=m[e.node2.id])
x = etree.SubElement(x, ns + "data", key="edge_weight")
x.text = "%.3f" % e.weight
# Export graph with pretty indented XML.
# http://effbot.org/zone/element-lib.htm#prettyprint
def indent(e, level=0):
w = "\n" + level * " "
if len(e):
if not e.text or not e.text.strip():
e.text = w + " "
if not e.tail or not e.tail.strip():
e.tail = w
for e in e:
indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = w
else:
if level and (not e.tail or not e.tail.strip()):
e.tail = w
indent(root)
tree = etree.ElementTree(root)
tree.write(path, encoding=encoding)
#--------------------------------------------------------------------------------------------------
# The export() and serialize() function are called from Graph.export() and Graph.serialize(),
# and are expected to handle any GraphRenderer by specifying an optional type=HTML|GRAPHML.
def export(graph, path, encoding="utf-8", **kwargs):
type = kwargs.pop("type", HTML)
# Export to GraphML.
if type == GRAPHML or path.endswith(".graphml"):
r = GraphMLRenderer(graph)
return r.export(path, directed=kwargs.get("directed", False), encoding=encoding)
# Export to HTML with <canvas>.
if type == HTML:
kwargs.setdefault("stylesheet", DEFAULT)
r = HTMLCanvasRenderer(graph, **kwargs)
return r.export(path, encoding)
def serialize(graph, type=HTML, **kwargs):
# Return GraphML string.
if type == GRAPHML:
r = GraphMLRenderer(graph)
return r.serialize(directed=kwargs.get("directed", False))
# Return HTML string.
if type in (HTML, CANVAS, STYLE, CSS, SCRIPT, DATA):
kwargs.setdefault("stylesheet", INLINE)
r = HTMLCanvasRenderer(graph, **kwargs)
return r.serialize(type)
# Backwards compatibility.
write, render = export, serialize
| krishna11888/ai | third_party/pattern/pattern/graph/__init__.py | Python | gpl-2.0 | 65,658 | [
"VisIt"
] | d60091af7f97544b3a4396d4df25644dc74f673789b6089639bfd597a9f51b38 |
"""
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from scipy.io.harwell_boeing._fortran_format_parser import \
FortranFormatParser, IntFormat, ExpFormat
__all__ = ["MalformedHeader", "read_hb", "write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo(object):
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented" \
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containg a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for " \
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if not mxtype.value_type in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if not mxtype.value_type in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type %s and " \
"value type %s" % (mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if not mxtype.value_type in ["integer"]:
raise ValueError("Inconsistency between matrix type %s and " \
"value type %s" % (mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = np.int
else:
raise ValueError("Unsupported format for values %s" % ct[2])
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value)
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=np.int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=np.int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError, e:
raise e
def _write_data(m, fid, header):
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType(object):
"""Class to hold the matrix type."""
# q2f* translates qualified names to fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
_f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
_f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 " \
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError:
raise ValueError("Unrecognized format %s" % fmt)
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if not value_type in self._q2f_type.keys():
raise ValueError("Unrecognized type %s" % value_type)
if not structure in self._q2f_structure.keys():
raise ValueError("Unrecognized structure %s" % structure)
if not storage in self._q2f_storage.keys():
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile(object):
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise IOError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(file):
"""Read HB-format file.
Parameters
----------
file : str-like or file-like
If a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if isinstance(file, basestring):
fid = open(file)
try:
return _get_matrix(fid)
finally:
fid.close()
else:
return _get_matrix(file)
def hb_write(file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
file : str-like or file-like
if a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if isinstance(file, basestring):
fid = open(file, "w")
try:
return _set_matrix(fid)
finally:
fid.close()
else:
return _set_matrix(file)
| teoliphant/scipy | scipy/io/harwell_boeing/hb.py | Python | bsd-3-clause | 18,387 | [
"exciting"
] | 62dd7d2a5b80efc50b531eb4462b0f47d8c7da9d44d99b82fa50fa210814b0f7 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestImageThresholdConnectivity(vtk.test.Testing.vtkTest):
def testImageThresholdConnectivity(self):
# This script is for testing the 3D flood fill filter.
# Image pipeline
renWin = vtk.vtkRenderWindow()
renWin.SetSize(192, 256)
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0, 63, 0, 63, 2, 5)
reader.SetDataSpacing(3.2, 3.2, 1.5)
reader.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
seeds = vtk.vtkPoints()
seeds.InsertNextPoint(0, 0, 0)
seeds.InsertNextPoint(100.8, 100.8, 0)
replacein = ["ReplaceInOn", "ReplaceInOff"]
replaceout = ["ReplaceOutOn", "ReplaceOutOff"]
thresholds = ["ThresholdByLower(800)", "ThresholdByUpper(1200)", "ThresholdBetween(800, 1200)"]
thresh = list()
map = list()
act = list()
ren = list()
k = 0
for rin in replacein:
for rout in replaceout:
for t in thresholds:
thresh.append(vtk.vtkImageThresholdConnectivity())
thresh[k].SetSeedPoints(seeds)
thresh[k].SetInValue(2000)
thresh[k].SetOutValue(0)
eval('thresh[k].' + rin + '()')
eval('thresh[k].' + rout + '()')
thresh[k].SetInputConnection(reader.GetOutputPort())
eval('thresh[k].' + t)
map.append(vtk.vtkImageMapper())
map[k].SetInputConnection(thresh[k].GetOutputPort())
if k < 3:
map[k].SetColorWindow(255)
map[k].SetColorLevel(127.5)
else:
map[k].SetColorWindow(2000)
map[k].SetColorLevel(1000)
act.append(vtk.vtkActor2D())
act[k].SetMapper(map[k])
ren.append(vtk.vtkRenderer())
ren[k].AddActor2D(act[k])
renWin.AddRenderer(ren[k])
k += 1
ren[0].SetViewport(0, 0, .33333, .25)
ren[1].SetViewport(.33333, 0, .66667, .25)
ren[2].SetViewport(.66667, 0, 1, .25)
ren[3].SetViewport(0, .25, .33333, .5)
ren[4].SetViewport(.33333, .25, .66667, .5)
ren[5].SetViewport(.66667, .25, 1, .5)
ren[6].SetViewport(0, .5, .33333, .75)
ren[7].SetViewport(.33333, .5, .66667, .75)
ren[8].SetViewport(.66667, .5, 1, .75)
ren[9].SetViewport(0, .75, .33333, 1)
ren[10].SetViewport(.33333, .75, .66667, 1)
ren[11].SetViewport(.66667, .75, 1, 1)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "TestImageThresholdConnectivity.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestImageThresholdConnectivity, 'test')])
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/TestImageThresholdConnectivity.py | Python | gpl-3.0 | 4,029 | [
"VTK"
] | bcb4e8c1bf3ff70df8c9979d5fdfecfc9ced7a646329a6497bc9cf24a1712999 |
"""
inflect.py: correctly generate plurals, ordinals, indefinite articles;
convert numbers to words
Copyright (C) 2010 Paul Dyson
Based upon the Perl module Lingua::EN::Inflect by Damian Conway.
The original Perl module Lingua::EN::Inflect by Damian Conway is
available from http://search.cpan.org/~dconway/
This module can be downloaded at http://pypi.org/project/inflect
methods:
classical inflect
plural plural_noun plural_verb plural_adj singular_noun no num a an
compare compare_nouns compare_verbs compare_adjs
present_participle
ordinal
number_to_words
join
defnoun defverb defadj defa defan
INFLECTIONS: classical inflect
plural plural_noun plural_verb plural_adj singular_noun compare
no num a an present_participle
PLURALS: classical inflect
plural plural_noun plural_verb plural_adj singular_noun no num
compare compare_nouns compare_verbs compare_adjs
COMPARISONS: classical
compare compare_nouns compare_verbs compare_adjs
ARTICLES: classical inflect num a an
NUMERICAL: ordinal number_to_words
USER_DEFINED: defnoun defverb defadj defa defan
Exceptions:
UnknownClassicalModeError
BadNumValueError
BadChunkingOptionError
NumOutOfRangeError
BadUserDefinedPatternError
BadRcFileError
BadGenderError
"""
from __future__ import unicode_literals
import ast
import sys
import re
class UnknownClassicalModeError(Exception):
pass
class BadNumValueError(Exception):
pass
class BadChunkingOptionError(Exception):
pass
class NumOutOfRangeError(Exception):
pass
class BadUserDefinedPatternError(Exception):
pass
class BadRcFileError(Exception):
pass
class BadGenderError(Exception):
pass
__version__ = "2.1.0"
STDOUT_ON = False
def print3(txt):
if STDOUT_ON:
print(txt)
def enclose(s):
return "(?:%s)" % s
def joinstem(cutpoint=0, words=""):
"""
join stem of each word in words into a string for regex
each word is truncated at cutpoint
cutpoint is usually negative indicating the number of letters to remove
from the end of each word
e.g.
joinstem(-2, ["ephemeris", "iris", ".*itis"]) returns
(?:ephemer|ir|.*it)
"""
return enclose("|".join(w[:cutpoint] for w in words))
def bysize(words):
"""
take a list of words and return a dict of sets sorted by word length
e.g.
ret[3]=set(['ant', 'cat', 'dog', 'pig'])
ret[4]=set(['frog', 'goat'])
ret[5]=set(['horse'])
ret[8]=set(['elephant'])
"""
ret = {}
for w in words:
if len(w) not in ret:
ret[len(w)] = set()
ret[len(w)].add(w)
return ret
def make_pl_si_lists(lst, plending, siendingsize, dojoinstem=True):
"""
given a list of singular words: lst
an ending to append to make the plural: plending
the number of characters to remove from the singular
before appending plending: siendingsize
a flag whether to create a joinstem: dojoinstem
return:
a list of pluralised words: si_list (called si because this is what you need to
look for to make the singular)
the pluralised words as a dict of sets sorted by word length: si_bysize
the singular words as a dict of sets sorted by word length: pl_bysize
if dojoinstem is True: a regular expression that matches any of the stems: stem
"""
if siendingsize is not None:
siendingsize = -siendingsize
si_list = [w[:siendingsize] + plending for w in lst]
pl_bysize = bysize(lst)
si_bysize = bysize(si_list)
if dojoinstem:
stem = joinstem(siendingsize, lst)
return si_list, si_bysize, pl_bysize, stem
else:
return si_list, si_bysize, pl_bysize
# 1. PLURALS
pl_sb_irregular_s = {
"corpus": "corpuses|corpora",
"opus": "opuses|opera",
"genus": "genera",
"mythos": "mythoi",
"penis": "penises|penes",
"testis": "testes",
"atlas": "atlases|atlantes",
"yes": "yeses",
}
pl_sb_irregular = {
"child": "children",
"brother": "brothers|brethren",
"loaf": "loaves",
"hoof": "hoofs|hooves",
"beef": "beefs|beeves",
"thief": "thiefs|thieves",
"money": "monies",
"mongoose": "mongooses",
"ox": "oxen",
"cow": "cows|kine",
"graffito": "graffiti",
"octopus": "octopuses|octopodes",
"genie": "genies|genii",
"ganglion": "ganglions|ganglia",
"trilby": "trilbys",
"turf": "turfs|turves",
"numen": "numina",
"atman": "atmas",
"occiput": "occiputs|occipita",
"sabretooth": "sabretooths",
"sabertooth": "sabertooths",
"lowlife": "lowlifes",
"flatfoot": "flatfoots",
"tenderfoot": "tenderfoots",
"romany": "romanies",
"jerry": "jerries",
"mary": "maries",
"talouse": "talouses",
"blouse": "blouses",
"rom": "roma",
"carmen": "carmina",
}
pl_sb_irregular.update(pl_sb_irregular_s)
# pl_sb_irregular_keys = enclose('|'.join(pl_sb_irregular.keys()))
pl_sb_irregular_caps = {
"Romany": "Romanies",
"Jerry": "Jerrys",
"Mary": "Marys",
"Rom": "Roma",
}
pl_sb_irregular_compound = {"prima donna": "prima donnas|prime donne"}
si_sb_irregular = {v: k for (k, v) in pl_sb_irregular.items()}
keys = list(si_sb_irregular.keys())
for k in keys:
if "|" in k:
k1, k2 = k.split("|")
si_sb_irregular[k1] = si_sb_irregular[k2] = si_sb_irregular[k]
del si_sb_irregular[k]
si_sb_irregular_caps = {v: k for (k, v) in pl_sb_irregular_caps.items()}
si_sb_irregular_compound = {v: k for (k, v) in pl_sb_irregular_compound.items()}
keys = list(si_sb_irregular_compound.keys())
for k in keys:
if "|" in k:
k1, k2 = k.split("|")
si_sb_irregular_compound[k1] = si_sb_irregular_compound[
k2
] = si_sb_irregular_compound[k]
del si_sb_irregular_compound[k]
# si_sb_irregular_keys = enclose('|'.join(si_sb_irregular.keys()))
# Z's that don't double
pl_sb_z_zes_list = ("quartz", "topaz")
pl_sb_z_zes_bysize = bysize(pl_sb_z_zes_list)
pl_sb_ze_zes_list = ("snooze",)
pl_sb_ze_zes_bysize = bysize(pl_sb_ze_zes_list)
# CLASSICAL "..is" -> "..ides"
pl_sb_C_is_ides_complete = [
# GENERAL WORDS...
"ephemeris",
"iris",
"clitoris",
"chrysalis",
"epididymis",
]
pl_sb_C_is_ides_endings = [
# INFLAMATIONS...
"itis"
]
pl_sb_C_is_ides = joinstem(
-2, pl_sb_C_is_ides_complete + [".*%s" % w for w in pl_sb_C_is_ides_endings]
)
pl_sb_C_is_ides_list = pl_sb_C_is_ides_complete + pl_sb_C_is_ides_endings
(
si_sb_C_is_ides_list,
si_sb_C_is_ides_bysize,
pl_sb_C_is_ides_bysize,
) = make_pl_si_lists(pl_sb_C_is_ides_list, "ides", 2, dojoinstem=False)
# CLASSICAL "..a" -> "..ata"
pl_sb_C_a_ata_list = (
"anathema",
"bema",
"carcinoma",
"charisma",
"diploma",
"dogma",
"drama",
"edema",
"enema",
"enigma",
"lemma",
"lymphoma",
"magma",
"melisma",
"miasma",
"oedema",
"sarcoma",
"schema",
"soma",
"stigma",
"stoma",
"trauma",
"gumma",
"pragma",
)
(
si_sb_C_a_ata_list,
si_sb_C_a_ata_bysize,
pl_sb_C_a_ata_bysize,
pl_sb_C_a_ata,
) = make_pl_si_lists(pl_sb_C_a_ata_list, "ata", 1)
# UNCONDITIONAL "..a" -> "..ae"
pl_sb_U_a_ae_list = ("alumna", "alga", "vertebra", "persona")
(
si_sb_U_a_ae_list,
si_sb_U_a_ae_bysize,
pl_sb_U_a_ae_bysize,
pl_sb_U_a_ae,
) = make_pl_si_lists(pl_sb_U_a_ae_list, "e", None)
# CLASSICAL "..a" -> "..ae"
pl_sb_C_a_ae_list = (
"amoeba",
"antenna",
"formula",
"hyperbola",
"medusa",
"nebula",
"parabola",
"abscissa",
"hydra",
"nova",
"lacuna",
"aurora",
"umbra",
"flora",
"fauna",
)
(
si_sb_C_a_ae_list,
si_sb_C_a_ae_bysize,
pl_sb_C_a_ae_bysize,
pl_sb_C_a_ae,
) = make_pl_si_lists(pl_sb_C_a_ae_list, "e", None)
# CLASSICAL "..en" -> "..ina"
pl_sb_C_en_ina_list = ("stamen", "foramen", "lumen")
(
si_sb_C_en_ina_list,
si_sb_C_en_ina_bysize,
pl_sb_C_en_ina_bysize,
pl_sb_C_en_ina,
) = make_pl_si_lists(pl_sb_C_en_ina_list, "ina", 2)
# UNCONDITIONAL "..um" -> "..a"
pl_sb_U_um_a_list = (
"bacterium",
"agendum",
"desideratum",
"erratum",
"stratum",
"datum",
"ovum",
"extremum",
"candelabrum",
)
(
si_sb_U_um_a_list,
si_sb_U_um_a_bysize,
pl_sb_U_um_a_bysize,
pl_sb_U_um_a,
) = make_pl_si_lists(pl_sb_U_um_a_list, "a", 2)
# CLASSICAL "..um" -> "..a"
pl_sb_C_um_a_list = (
"maximum",
"minimum",
"momentum",
"optimum",
"quantum",
"cranium",
"curriculum",
"dictum",
"phylum",
"aquarium",
"compendium",
"emporium",
"enconium",
"gymnasium",
"honorarium",
"interregnum",
"lustrum",
"memorandum",
"millennium",
"rostrum",
"spectrum",
"speculum",
"stadium",
"trapezium",
"ultimatum",
"medium",
"vacuum",
"velum",
"consortium",
"arboretum",
)
(
si_sb_C_um_a_list,
si_sb_C_um_a_bysize,
pl_sb_C_um_a_bysize,
pl_sb_C_um_a,
) = make_pl_si_lists(pl_sb_C_um_a_list, "a", 2)
# UNCONDITIONAL "..us" -> "i"
pl_sb_U_us_i_list = (
"alumnus",
"alveolus",
"bacillus",
"bronchus",
"locus",
"nucleus",
"stimulus",
"meniscus",
"sarcophagus",
)
(
si_sb_U_us_i_list,
si_sb_U_us_i_bysize,
pl_sb_U_us_i_bysize,
pl_sb_U_us_i,
) = make_pl_si_lists(pl_sb_U_us_i_list, "i", 2)
# CLASSICAL "..us" -> "..i"
pl_sb_C_us_i_list = (
"focus",
"radius",
"genius",
"incubus",
"succubus",
"nimbus",
"fungus",
"nucleolus",
"stylus",
"torus",
"umbilicus",
"uterus",
"hippopotamus",
"cactus",
)
(
si_sb_C_us_i_list,
si_sb_C_us_i_bysize,
pl_sb_C_us_i_bysize,
pl_sb_C_us_i,
) = make_pl_si_lists(pl_sb_C_us_i_list, "i", 2)
# CLASSICAL "..us" -> "..us" (ASSIMILATED 4TH DECLENSION LATIN NOUNS)
pl_sb_C_us_us = (
"status",
"apparatus",
"prospectus",
"sinus",
"hiatus",
"impetus",
"plexus",
)
pl_sb_C_us_us_bysize = bysize(pl_sb_C_us_us)
# UNCONDITIONAL "..on" -> "a"
pl_sb_U_on_a_list = (
"criterion",
"perihelion",
"aphelion",
"phenomenon",
"prolegomenon",
"noumenon",
"organon",
"asyndeton",
"hyperbaton",
)
(
si_sb_U_on_a_list,
si_sb_U_on_a_bysize,
pl_sb_U_on_a_bysize,
pl_sb_U_on_a,
) = make_pl_si_lists(pl_sb_U_on_a_list, "a", 2)
# CLASSICAL "..on" -> "..a"
pl_sb_C_on_a_list = ("oxymoron",)
(
si_sb_C_on_a_list,
si_sb_C_on_a_bysize,
pl_sb_C_on_a_bysize,
pl_sb_C_on_a,
) = make_pl_si_lists(pl_sb_C_on_a_list, "a", 2)
# CLASSICAL "..o" -> "..i" (BUT NORMALLY -> "..os")
pl_sb_C_o_i = [
"solo",
"soprano",
"basso",
"alto",
"contralto",
"tempo",
"piano",
"virtuoso",
] # list not tuple so can concat for pl_sb_U_o_os
pl_sb_C_o_i_bysize = bysize(pl_sb_C_o_i)
si_sb_C_o_i_bysize = bysize(["%si" % w[:-1] for w in pl_sb_C_o_i])
pl_sb_C_o_i_stems = joinstem(-1, pl_sb_C_o_i)
# ALWAYS "..o" -> "..os"
pl_sb_U_o_os_complete = {"ado", "ISO", "NATO", "NCO", "NGO", "oto"}
si_sb_U_o_os_complete = {"%ss" % w for w in pl_sb_U_o_os_complete}
pl_sb_U_o_os_endings = [
"aficionado",
"aggro",
"albino",
"allegro",
"ammo",
"Antananarivo",
"archipelago",
"armadillo",
"auto",
"avocado",
"Bamako",
"Barquisimeto",
"bimbo",
"bingo",
"Biro",
"bolero",
"Bolzano",
"bongo",
"Boto",
"burro",
"Cairo",
"canto",
"cappuccino",
"casino",
"cello",
"Chicago",
"Chimango",
"cilantro",
"cochito",
"coco",
"Colombo",
"Colorado",
"commando",
"concertino",
"contango",
"credo",
"crescendo",
"cyano",
"demo",
"ditto",
"Draco",
"dynamo",
"embryo",
"Esperanto",
"espresso",
"euro",
"falsetto",
"Faro",
"fiasco",
"Filipino",
"flamenco",
"furioso",
"generalissimo",
"Gestapo",
"ghetto",
"gigolo",
"gizmo",
"Greensboro",
"gringo",
"Guaiabero",
"guano",
"gumbo",
"gyro",
"hairdo",
"hippo",
"Idaho",
"impetigo",
"inferno",
"info",
"intermezzo",
"intertrigo",
"Iquico",
"jumbo",
"junto",
"Kakapo",
"kilo",
"Kinkimavo",
"Kokako",
"Kosovo",
"Lesotho",
"libero",
"libido",
"libretto",
"lido",
"Lilo",
"limbo",
"limo",
"lineno",
"lingo",
"lino",
"livedo",
"loco",
"logo",
"lumbago",
"macho",
"macro",
"mafioso",
"magneto",
"magnifico",
"Majuro",
"Malabo",
"manifesto",
"Maputo",
"Maracaibo",
"medico",
"memo",
"metro",
"Mexico",
"micro",
"Milano",
"Monaco",
"mono",
"Montenegro",
"Morocco",
"Muqdisho",
"myo",
"neutrino",
"Ningbo",
"octavo",
"oregano",
"Orinoco",
"Orlando",
"Oslo",
"panto",
"Paramaribo",
"Pardusco",
"pedalo",
"photo",
"pimento",
"pinto",
"pleco",
"Pluto",
"pogo",
"polo",
"poncho",
"Porto-Novo",
"Porto",
"pro",
"psycho",
"pueblo",
"quarto",
"Quito",
"rhino",
"risotto",
"rococo",
"rondo",
"Sacramento",
"saddo",
"sago",
"salvo",
"Santiago",
"Sapporo",
"Sarajevo",
"scherzando",
"scherzo",
"silo",
"sirocco",
"sombrero",
"staccato",
"sterno",
"stucco",
"stylo",
"sumo",
"Taiko",
"techno",
"terrazzo",
"testudo",
"timpano",
"tiro",
"tobacco",
"Togo",
"Tokyo",
"torero",
"Torino",
"Toronto",
"torso",
"tremolo",
"typo",
"tyro",
"ufo",
"UNESCO",
"vaquero",
"vermicello",
"verso",
"vibrato",
"violoncello",
"Virgo",
"weirdo",
"WHO",
"WTO",
"Yamoussoukro",
"yo-yo",
"zero",
"Zibo",
] + pl_sb_C_o_i
pl_sb_U_o_os_bysize = bysize(pl_sb_U_o_os_endings)
si_sb_U_o_os_bysize = bysize(["%ss" % w for w in pl_sb_U_o_os_endings])
# UNCONDITIONAL "..ch" -> "..chs"
pl_sb_U_ch_chs_list = ("czech", "eunuch", "stomach")
(
si_sb_U_ch_chs_list,
si_sb_U_ch_chs_bysize,
pl_sb_U_ch_chs_bysize,
pl_sb_U_ch_chs,
) = make_pl_si_lists(pl_sb_U_ch_chs_list, "s", None)
# UNCONDITIONAL "..[ei]x" -> "..ices"
pl_sb_U_ex_ices_list = ("codex", "murex", "silex")
(
si_sb_U_ex_ices_list,
si_sb_U_ex_ices_bysize,
pl_sb_U_ex_ices_bysize,
pl_sb_U_ex_ices,
) = make_pl_si_lists(pl_sb_U_ex_ices_list, "ices", 2)
pl_sb_U_ix_ices_list = ("radix", "helix")
(
si_sb_U_ix_ices_list,
si_sb_U_ix_ices_bysize,
pl_sb_U_ix_ices_bysize,
pl_sb_U_ix_ices,
) = make_pl_si_lists(pl_sb_U_ix_ices_list, "ices", 2)
# CLASSICAL "..[ei]x" -> "..ices"
pl_sb_C_ex_ices_list = (
"vortex",
"vertex",
"cortex",
"latex",
"pontifex",
"apex",
"index",
"simplex",
)
(
si_sb_C_ex_ices_list,
si_sb_C_ex_ices_bysize,
pl_sb_C_ex_ices_bysize,
pl_sb_C_ex_ices,
) = make_pl_si_lists(pl_sb_C_ex_ices_list, "ices", 2)
pl_sb_C_ix_ices_list = ("appendix",)
(
si_sb_C_ix_ices_list,
si_sb_C_ix_ices_bysize,
pl_sb_C_ix_ices_bysize,
pl_sb_C_ix_ices,
) = make_pl_si_lists(pl_sb_C_ix_ices_list, "ices", 2)
# ARABIC: ".." -> "..i"
pl_sb_C_i_list = ("afrit", "afreet", "efreet")
(si_sb_C_i_list, si_sb_C_i_bysize, pl_sb_C_i_bysize, pl_sb_C_i) = make_pl_si_lists(
pl_sb_C_i_list, "i", None
)
# HEBREW: ".." -> "..im"
pl_sb_C_im_list = ("goy", "seraph", "cherub")
(si_sb_C_im_list, si_sb_C_im_bysize, pl_sb_C_im_bysize, pl_sb_C_im) = make_pl_si_lists(
pl_sb_C_im_list, "im", None
)
# UNCONDITIONAL "..man" -> "..mans"
pl_sb_U_man_mans_list = """
ataman caiman cayman ceriman
desman dolman farman harman hetman
human leman ottoman shaman talisman
""".split()
pl_sb_U_man_mans_caps_list = """
Alabaman Bahaman Burman German
Hiroshiman Liman Nakayaman Norman Oklahoman
Panaman Roman Selman Sonaman Tacoman Yakiman
Yokohaman Yuman
""".split()
(
si_sb_U_man_mans_list,
si_sb_U_man_mans_bysize,
pl_sb_U_man_mans_bysize,
) = make_pl_si_lists(pl_sb_U_man_mans_list, "s", None, dojoinstem=False)
(
si_sb_U_man_mans_caps_list,
si_sb_U_man_mans_caps_bysize,
pl_sb_U_man_mans_caps_bysize,
) = make_pl_si_lists(pl_sb_U_man_mans_caps_list, "s", None, dojoinstem=False)
pl_sb_uninflected_s_complete = [
# PAIRS OR GROUPS SUBSUMED TO A SINGULAR...
"breeches",
"britches",
"pajamas",
"pyjamas",
"clippers",
"gallows",
"hijinks",
"headquarters",
"pliers",
"scissors",
"testes",
"herpes",
"pincers",
"shears",
"proceedings",
"trousers",
# UNASSIMILATED LATIN 4th DECLENSION
"cantus",
"coitus",
"nexus",
# RECENT IMPORTS...
"contretemps",
"corps",
"debris",
"siemens",
# DISEASES
"mumps",
# MISCELLANEOUS OTHERS...
"diabetes",
"jackanapes",
"series",
"species",
"subspecies",
"rabies",
"chassis",
"innings",
"news",
"mews",
"haggis",
]
pl_sb_uninflected_s_endings = [
# RECENT IMPORTS...
"ois",
# DISEASES
"measles",
]
pl_sb_uninflected_s = pl_sb_uninflected_s_complete + [
".*%s" % w for w in pl_sb_uninflected_s_endings
]
pl_sb_uninflected_herd = (
# DON'T INFLECT IN CLASSICAL MODE, OTHERWISE NORMAL INFLECTION
"wildebeest",
"swine",
"eland",
"bison",
"buffalo",
"elk",
"rhinoceros",
"zucchini",
"caribou",
"dace",
"grouse",
"guinea fowl",
"guinea-fowl",
"haddock",
"hake",
"halibut",
"herring",
"mackerel",
"pickerel",
"pike",
"roe",
"seed",
"shad",
"snipe",
"teal",
"turbot",
"water fowl",
"water-fowl",
)
pl_sb_uninflected_complete = [
# SOME FISH AND HERD ANIMALS
"tuna",
"salmon",
"mackerel",
"trout",
"bream",
"sea-bass",
"sea bass",
"carp",
"cod",
"flounder",
"whiting",
"moose",
# OTHER ODDITIES
"graffiti",
"djinn",
"samuri",
"offspring",
"pence",
"quid",
"hertz",
] + pl_sb_uninflected_s_complete
# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE)
pl_sb_uninflected_caps = [
# ALL NATIONALS ENDING IN -ese
"Portuguese",
"Amoyese",
"Borghese",
"Congoese",
"Faroese",
"Foochowese",
"Genevese",
"Genoese",
"Gilbertese",
"Hottentotese",
"Kiplingese",
"Kongoese",
"Lucchese",
"Maltese",
"Nankingese",
"Niasese",
"Pekingese",
"Piedmontese",
"Pistoiese",
"Sarawakese",
"Shavese",
"Vermontese",
"Wenchowese",
"Yengeese",
]
pl_sb_uninflected_endings = [
# SOME FISH AND HERD ANIMALS
"fish",
"deer",
"sheep",
# ALL NATIONALS ENDING IN -ese
"nese",
"rese",
"lese",
"mese",
# DISEASES
"pox",
# OTHER ODDITIES
"craft",
] + pl_sb_uninflected_s_endings
# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE)
pl_sb_uninflected_bysize = bysize(pl_sb_uninflected_endings)
# SINGULAR WORDS ENDING IN ...s (ALL INFLECT WITH ...es)
pl_sb_singular_s_complete = [
"acropolis",
"aegis",
"alias",
"asbestos",
"bathos",
"bias",
"bronchitis",
"bursitis",
"caddis",
"cannabis",
"canvas",
"chaos",
"cosmos",
"dais",
"digitalis",
"epidermis",
"ethos",
"eyas",
"gas",
"glottis",
"hubris",
"ibis",
"lens",
"mantis",
"marquis",
"metropolis",
"pathos",
"pelvis",
"polis",
"rhinoceros",
"sassafras",
"trellis",
] + pl_sb_C_is_ides_complete
pl_sb_singular_s_endings = ["ss", "us"] + pl_sb_C_is_ides_endings
pl_sb_singular_s_bysize = bysize(pl_sb_singular_s_endings)
si_sb_singular_s_complete = ["%ses" % w for w in pl_sb_singular_s_complete]
si_sb_singular_s_endings = ["%ses" % w for w in pl_sb_singular_s_endings]
si_sb_singular_s_bysize = bysize(si_sb_singular_s_endings)
pl_sb_singular_s_es = ["[A-Z].*es"]
pl_sb_singular_s = enclose(
"|".join(
pl_sb_singular_s_complete
+ [".*%s" % w for w in pl_sb_singular_s_endings]
+ pl_sb_singular_s_es
)
)
# PLURALS ENDING IN uses -> use
si_sb_ois_oi_case = ("Bolshois", "Hanois")
si_sb_uses_use_case = ("Betelgeuses", "Duses", "Meuses", "Syracuses", "Toulouses")
si_sb_uses_use = (
"abuses",
"applauses",
"blouses",
"carouses",
"causes",
"chartreuses",
"clauses",
"contuses",
"douses",
"excuses",
"fuses",
"grouses",
"hypotenuses",
"masseuses",
"menopauses",
"misuses",
"muses",
"overuses",
"pauses",
"peruses",
"profuses",
"recluses",
"reuses",
"ruses",
"souses",
"spouses",
"suffuses",
"transfuses",
"uses",
)
si_sb_ies_ie_case = (
"Addies",
"Aggies",
"Allies",
"Amies",
"Angies",
"Annies",
"Annmaries",
"Archies",
"Arties",
"Aussies",
"Barbies",
"Barries",
"Basies",
"Bennies",
"Bernies",
"Berties",
"Bessies",
"Betties",
"Billies",
"Blondies",
"Bobbies",
"Bonnies",
"Bowies",
"Brandies",
"Bries",
"Brownies",
"Callies",
"Carnegies",
"Carries",
"Cassies",
"Charlies",
"Cheries",
"Christies",
"Connies",
"Curies",
"Dannies",
"Debbies",
"Dixies",
"Dollies",
"Donnies",
"Drambuies",
"Eddies",
"Effies",
"Ellies",
"Elsies",
"Eries",
"Ernies",
"Essies",
"Eugenies",
"Fannies",
"Flossies",
"Frankies",
"Freddies",
"Gillespies",
"Goldies",
"Gracies",
"Guthries",
"Hallies",
"Hatties",
"Hetties",
"Hollies",
"Jackies",
"Jamies",
"Janies",
"Jannies",
"Jeanies",
"Jeannies",
"Jennies",
"Jessies",
"Jimmies",
"Jodies",
"Johnies",
"Johnnies",
"Josies",
"Julies",
"Kalgoorlies",
"Kathies",
"Katies",
"Kellies",
"Kewpies",
"Kristies",
"Laramies",
"Lassies",
"Lauries",
"Leslies",
"Lessies",
"Lillies",
"Lizzies",
"Lonnies",
"Lories",
"Lorries",
"Lotties",
"Louies",
"Mackenzies",
"Maggies",
"Maisies",
"Mamies",
"Marcies",
"Margies",
"Maries",
"Marjories",
"Matties",
"McKenzies",
"Melanies",
"Mickies",
"Millies",
"Minnies",
"Mollies",
"Mounties",
"Nannies",
"Natalies",
"Nellies",
"Netties",
"Ollies",
"Ozzies",
"Pearlies",
"Pottawatomies",
"Reggies",
"Richies",
"Rickies",
"Robbies",
"Ronnies",
"Rosalies",
"Rosemaries",
"Rosies",
"Roxies",
"Rushdies",
"Ruthies",
"Sadies",
"Sallies",
"Sammies",
"Scotties",
"Selassies",
"Sherries",
"Sophies",
"Stacies",
"Stefanies",
"Stephanies",
"Stevies",
"Susies",
"Sylvies",
"Tammies",
"Terries",
"Tessies",
"Tommies",
"Tracies",
"Trekkies",
"Valaries",
"Valeries",
"Valkyries",
"Vickies",
"Virgies",
"Willies",
"Winnies",
"Wylies",
"Yorkies",
)
si_sb_ies_ie = (
"aeries",
"baggies",
"belies",
"biggies",
"birdies",
"bogies",
"bonnies",
"boogies",
"bookies",
"bourgeoisies",
"brownies",
"budgies",
"caddies",
"calories",
"camaraderies",
"cockamamies",
"collies",
"cookies",
"coolies",
"cooties",
"coteries",
"crappies",
"curies",
"cutesies",
"dogies",
"eyrie",
"floozies",
"footsies",
"freebies",
"genies",
"goalies",
"groupies",
"hies",
"jalousies",
"junkies",
"kiddies",
"laddies",
"lassies",
"lies",
"lingeries",
"magpies",
"menageries",
"mommies",
"movies",
"neckties",
"newbies",
"nighties",
"oldies",
"organdies",
"overlies",
"pies",
"pinkies",
"pixies",
"potpies",
"prairies",
"quickies",
"reveries",
"rookies",
"rotisseries",
"softies",
"sorties",
"species",
"stymies",
"sweeties",
"ties",
"underlies",
"unties",
"veggies",
"vies",
"yuppies",
"zombies",
)
si_sb_oes_oe_case = (
"Chloes",
"Crusoes",
"Defoes",
"Faeroes",
"Ivanhoes",
"Joes",
"McEnroes",
"Moes",
"Monroes",
"Noes",
"Poes",
"Roscoes",
"Tahoes",
"Tippecanoes",
"Zoes",
)
si_sb_oes_oe = (
"aloes",
"backhoes",
"canoes",
"does",
"floes",
"foes",
"hoes",
"mistletoes",
"oboes",
"pekoes",
"roes",
"sloes",
"throes",
"tiptoes",
"toes",
"woes",
)
si_sb_z_zes = ("quartzes", "topazes")
si_sb_zzes_zz = ("buzzes", "fizzes", "frizzes", "razzes")
si_sb_ches_che_case = (
"Andromaches",
"Apaches",
"Blanches",
"Comanches",
"Nietzsches",
"Porsches",
"Roches",
)
si_sb_ches_che = (
"aches",
"avalanches",
"backaches",
"bellyaches",
"caches",
"cloches",
"creches",
"douches",
"earaches",
"fiches",
"headaches",
"heartaches",
"microfiches",
"niches",
"pastiches",
"psyches",
"quiches",
"stomachaches",
"toothaches",
)
si_sb_xes_xe = ("annexes", "axes", "deluxes", "pickaxes")
si_sb_sses_sse_case = ("Hesses", "Jesses", "Larousses", "Matisses")
si_sb_sses_sse = (
"bouillabaisses",
"crevasses",
"demitasses",
"impasses",
"mousses",
"posses",
)
si_sb_ves_ve_case = (
# *[nwl]ives -> [nwl]live
"Clives",
"Palmolives",
)
si_sb_ves_ve = (
# *[^d]eaves -> eave
"interweaves",
"weaves",
# *[nwl]ives -> [nwl]live
"olives",
# *[eoa]lves -> [eoa]lve
"bivalves",
"dissolves",
"resolves",
"salves",
"twelves",
"valves",
)
plverb_special_s = enclose(
"|".join(
[pl_sb_singular_s]
+ pl_sb_uninflected_s
+ list(pl_sb_irregular_s.keys())
+ ["(.*[csx])is", "(.*)ceps", "[A-Z].*s"]
)
)
pl_sb_postfix_adj = {
"general": [r"(?!major|lieutenant|brigadier|adjutant|.*star)\S+"],
"martial": ["court"],
"force": ["pound"],
}
for k in list(pl_sb_postfix_adj.keys()):
pl_sb_postfix_adj[k] = enclose(
enclose("|".join(pl_sb_postfix_adj[k])) + "(?=(?:-|\\s+)%s)" % k
)
pl_sb_postfix_adj_stems = "(" + "|".join(list(pl_sb_postfix_adj.values())) + ")(.*)"
# PLURAL WORDS ENDING IS es GO TO SINGULAR is
si_sb_es_is = (
"amanuenses",
"amniocenteses",
"analyses",
"antitheses",
"apotheoses",
"arterioscleroses",
"atheroscleroses",
"axes",
# 'bases', # bases -> basis
"catalyses",
"catharses",
"chasses",
"cirrhoses",
"cocces",
"crises",
"diagnoses",
"dialyses",
"diereses",
"electrolyses",
"emphases",
"exegeses",
"geneses",
"halitoses",
"hydrolyses",
"hypnoses",
"hypotheses",
"hystereses",
"metamorphoses",
"metastases",
"misdiagnoses",
"mitoses",
"mononucleoses",
"narcoses",
"necroses",
"nemeses",
"neuroses",
"oases",
"osmoses",
"osteoporoses",
"paralyses",
"parentheses",
"parthenogeneses",
"periphrases",
"photosyntheses",
"probosces",
"prognoses",
"prophylaxes",
"prostheses",
"preces",
"psoriases",
"psychoanalyses",
"psychokineses",
"psychoses",
"scleroses",
"scolioses",
"sepses",
"silicoses",
"symbioses",
"synopses",
"syntheses",
"taxes",
"telekineses",
"theses",
"thromboses",
"tuberculoses",
"urinalyses",
)
pl_prep_list = """
about above across after among around at athwart before behind
below beneath beside besides between betwixt beyond but by
during except for from in into near of off on onto out over
since till to under until unto upon with""".split()
pl_prep_list_da = pl_prep_list + ["de", "du", "da"]
pl_prep_bysize = bysize(pl_prep_list_da)
pl_prep = enclose("|".join(pl_prep_list_da))
pl_sb_prep_dual_compound = (
r"(.*?)((?:-|\s+)(?:" + pl_prep + r")(?:-|\s+))a(?:-|\s+)(.*)"
)
singular_pronoun_genders = {
"neuter",
"feminine",
"masculine",
"gender-neutral",
"feminine or masculine",
"masculine or feminine",
}
pl_pron_nom = {
# NOMINATIVE REFLEXIVE
"i": "we",
"myself": "ourselves",
"you": "you",
"yourself": "yourselves",
"she": "they",
"herself": "themselves",
"he": "they",
"himself": "themselves",
"it": "they",
"itself": "themselves",
"they": "they",
"themself": "themselves",
# POSSESSIVE
"mine": "ours",
"yours": "yours",
"hers": "theirs",
"his": "theirs",
"its": "theirs",
"theirs": "theirs",
}
si_pron = {}
si_pron["nom"] = {v: k for (k, v) in pl_pron_nom.items()}
si_pron["nom"]["we"] = "I"
pl_pron_acc = {
# ACCUSATIVE REFLEXIVE
"me": "us",
"myself": "ourselves",
"you": "you",
"yourself": "yourselves",
"her": "them",
"herself": "themselves",
"him": "them",
"himself": "themselves",
"it": "them",
"itself": "themselves",
"them": "them",
"themself": "themselves",
}
pl_pron_acc_keys = enclose("|".join(list(pl_pron_acc.keys())))
pl_pron_acc_keys_bysize = bysize(list(pl_pron_acc.keys()))
si_pron["acc"] = {v: k for (k, v) in pl_pron_acc.items()}
for thecase, plur, gend, sing in (
("nom", "they", "neuter", "it"),
("nom", "they", "feminine", "she"),
("nom", "they", "masculine", "he"),
("nom", "they", "gender-neutral", "they"),
("nom", "they", "feminine or masculine", "she or he"),
("nom", "they", "masculine or feminine", "he or she"),
("nom", "themselves", "neuter", "itself"),
("nom", "themselves", "feminine", "herself"),
("nom", "themselves", "masculine", "himself"),
("nom", "themselves", "gender-neutral", "themself"),
("nom", "themselves", "feminine or masculine", "herself or himself"),
("nom", "themselves", "masculine or feminine", "himself or herself"),
("nom", "theirs", "neuter", "its"),
("nom", "theirs", "feminine", "hers"),
("nom", "theirs", "masculine", "his"),
("nom", "theirs", "gender-neutral", "theirs"),
("nom", "theirs", "feminine or masculine", "hers or his"),
("nom", "theirs", "masculine or feminine", "his or hers"),
("acc", "them", "neuter", "it"),
("acc", "them", "feminine", "her"),
("acc", "them", "masculine", "him"),
("acc", "them", "gender-neutral", "them"),
("acc", "them", "feminine or masculine", "her or him"),
("acc", "them", "masculine or feminine", "him or her"),
("acc", "themselves", "neuter", "itself"),
("acc", "themselves", "feminine", "herself"),
("acc", "themselves", "masculine", "himself"),
("acc", "themselves", "gender-neutral", "themself"),
("acc", "themselves", "feminine or masculine", "herself or himself"),
("acc", "themselves", "masculine or feminine", "himself or herself"),
):
try:
si_pron[thecase][plur][gend] = sing
except TypeError:
si_pron[thecase][plur] = {}
si_pron[thecase][plur][gend] = sing
si_pron_acc_keys = enclose("|".join(list(si_pron["acc"].keys())))
si_pron_acc_keys_bysize = bysize(list(si_pron["acc"].keys()))
def get_si_pron(thecase, word, gender):
try:
sing = si_pron[thecase][word]
except KeyError:
raise # not a pronoun
try:
return sing[gender] # has several types due to gender
except TypeError:
return sing # answer independent of gender
plverb_irregular_pres = {
# 1st PERS. SING. 2ND PERS. SING. 3RD PERS. SINGULAR
# 3RD PERS. (INDET.)
"am": "are",
"are": "are",
"is": "are",
"was": "were",
"were": "were",
"was": "were",
"have": "have",
"have": "have",
"has": "have",
"do": "do",
"do": "do",
"does": "do",
}
plverb_ambiguous_pres = {
# 1st PERS. SING. 2ND PERS. SING. 3RD PERS. SINGULAR
# 3RD PERS. (INDET.)
"act": "act",
"act": "act",
"acts": "act",
"blame": "blame",
"blame": "blame",
"blames": "blame",
"can": "can",
"can": "can",
"can": "can",
"must": "must",
"must": "must",
"must": "must",
"fly": "fly",
"fly": "fly",
"flies": "fly",
"copy": "copy",
"copy": "copy",
"copies": "copy",
"drink": "drink",
"drink": "drink",
"drinks": "drink",
"fight": "fight",
"fight": "fight",
"fights": "fight",
"fire": "fire",
"fire": "fire",
"fires": "fire",
"like": "like",
"like": "like",
"likes": "like",
"look": "look",
"look": "look",
"looks": "look",
"make": "make",
"make": "make",
"makes": "make",
"reach": "reach",
"reach": "reach",
"reaches": "reach",
"run": "run",
"run": "run",
"runs": "run",
"sink": "sink",
"sink": "sink",
"sinks": "sink",
"sleep": "sleep",
"sleep": "sleep",
"sleeps": "sleep",
"view": "view",
"view": "view",
"views": "view",
}
plverb_ambiguous_pres_keys = enclose("|".join(list(plverb_ambiguous_pres.keys())))
plverb_irregular_non_pres = (
"did",
"had",
"ate",
"made",
"put",
"spent",
"fought",
"sank",
"gave",
"sought",
"shall",
"could",
"ought",
"should",
)
plverb_ambiguous_non_pres = enclose(
"|".join(("thought", "saw", "bent", "will", "might", "cut"))
)
# "..oes" -> "..oe" (the rest are "..oes" -> "o")
pl_v_oes_oe = ("canoes", "floes", "oboes", "roes", "throes", "woes")
pl_v_oes_oe_endings_size4 = ("hoes", "toes")
pl_v_oes_oe_endings_size5 = ("shoes",)
pl_count_zero = ("0", "no", "zero", "nil")
pl_count_one = ("1", "a", "an", "one", "each", "every", "this", "that")
pl_adj_special = {"a": "some", "an": "some", "this": "these", "that": "those"}
pl_adj_special_keys = enclose("|".join(list(pl_adj_special.keys())))
pl_adj_poss = {
"my": "our",
"your": "your",
"its": "their",
"her": "their",
"his": "their",
"their": "their",
}
pl_adj_poss_keys = enclose("|".join(list(pl_adj_poss.keys())))
# 2. INDEFINITE ARTICLES
# THIS PATTERN MATCHES STRINGS OF CAPITALS STARTING WITH A "VOWEL-SOUND"
# CONSONANT FOLLOWED BY ANOTHER CONSONANT, AND WHICH ARE NOT LIKELY
# TO BE REAL WORDS (OH, ALL RIGHT THEN, IT'S JUST MAGIC!)
A_abbrev = r"""
(?! FJO | [HLMNS]Y. | RY[EO] | SQU
| ( F[LR]? | [HL] | MN? | N | RH? | S[CHKLMNPTVW]? | X(YL)?) [AEIOU])
[FHLMNRSX][A-Z]
"""
# THIS PATTERN CODES THE BEGINNINGS OF ALL ENGLISH WORDS BEGINING WITH A
# 'y' FOLLOWED BY A CONSONANT. ANY OTHER Y-CONSONANT PREFIX THEREFORE
# IMPLIES AN ABBREVIATION.
A_y_cons = "y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)"
# EXCEPTIONS TO EXCEPTIONS
A_explicit_a = enclose("|".join(("unabomber", "unanimous", "US")))
A_explicit_an = enclose(
"|".join(("euler", "hour(?!i)", "heir", "honest", "hono[ur]", "mpeg"))
)
A_ordinal_an = enclose("|".join(("[aefhilmnorsx]-?th",)))
A_ordinal_a = enclose("|".join(("[bcdgjkpqtuvwyz]-?th",)))
# NUMERICAL INFLECTIONS
nth = {
0: "th",
1: "st",
2: "nd",
3: "rd",
4: "th",
5: "th",
6: "th",
7: "th",
8: "th",
9: "th",
11: "th",
12: "th",
13: "th",
}
ordinal = dict(
ty="tieth",
one="first",
two="second",
three="third",
five="fifth",
eight="eighth",
nine="ninth",
twelve="twelfth",
)
ordinal_suff = "|".join(list(ordinal.keys()))
# NUMBERS
unit = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
teen = [
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]
ten = [
"",
"",
"twenty",
"thirty",
"forty",
"fifty",
"sixty",
"seventy",
"eighty",
"ninety",
]
mill = [
" ",
" thousand",
" million",
" billion",
" trillion",
" quadrillion",
" quintillion",
" sextillion",
" septillion",
" octillion",
" nonillion",
" decillion",
]
# SUPPORT CLASSICAL PLURALIZATIONS
def_classical = dict(
all=False, zero=False, herd=False, names=True, persons=False, ancient=False
)
all_classical = {k: True for k in list(def_classical.keys())}
no_classical = {k: False for k in list(def_classical.keys())}
# Maps strings to built-in constant types
string_to_constant = {"True": True, "False": False, "None": None}
class engine:
def __init__(self):
self.classical_dict = def_classical.copy()
self.persistent_count = None
self.mill_count = 0
self.pl_sb_user_defined = []
self.pl_v_user_defined = []
self.pl_adj_user_defined = []
self.si_sb_user_defined = []
self.A_a_user_defined = []
self.thegender = "neuter"
deprecated_methods = dict(
pl="plural",
plnoun="plural_noun",
plverb="plural_verb",
pladj="plural_adj",
sinoun="single_noun",
prespart="present_participle",
numwords="number_to_words",
plequal="compare",
plnounequal="compare_nouns",
plverbequal="compare_verbs",
pladjequal="compare_adjs",
wordlist="join",
)
def __getattr__(self, meth):
if meth in self.deprecated_methods:
print3(
"{}() deprecated, use {}()".format(meth, self.deprecated_methods[meth])
)
raise DeprecationWarning
raise AttributeError
def defnoun(self, singular, plural):
"""
Set the noun plural of singular to plural.
"""
self.checkpat(singular)
self.checkpatplural(plural)
self.pl_sb_user_defined.extend((singular, plural))
self.si_sb_user_defined.extend((plural, singular))
return 1
def defverb(self, s1, p1, s2, p2, s3, p3):
"""
Set the verb plurals for s1, s2 and s3 to p1, p2 and p3 respectively.
Where 1, 2 and 3 represent the 1st, 2nd and 3rd person forms of the verb.
"""
self.checkpat(s1)
self.checkpat(s2)
self.checkpat(s3)
self.checkpatplural(p1)
self.checkpatplural(p2)
self.checkpatplural(p3)
self.pl_v_user_defined.extend((s1, p1, s2, p2, s3, p3))
return 1
def defadj(self, singular, plural):
"""
Set the adjective plural of singular to plural.
"""
self.checkpat(singular)
self.checkpatplural(plural)
self.pl_adj_user_defined.extend((singular, plural))
return 1
def defa(self, pattern):
"""
Define the indefinate article as 'a' for words matching pattern.
"""
self.checkpat(pattern)
self.A_a_user_defined.extend((pattern, "a"))
return 1
def defan(self, pattern):
"""
Define the indefinate article as 'an' for words matching pattern.
"""
self.checkpat(pattern)
self.A_a_user_defined.extend((pattern, "an"))
return 1
def checkpat(self, pattern):
"""
check for errors in a regex pattern
"""
if pattern is None:
return
try:
re.match(pattern, "")
except re.error:
print3("\nBad user-defined singular pattern:\n\t%s\n" % pattern)
raise BadUserDefinedPatternError
def checkpatplural(self, pattern):
"""
check for errors in a regex replace pattern
"""
return
def ud_match(self, word, wordlist):
for i in range(len(wordlist) - 2, -2, -2): # backwards through even elements
mo = re.search(r"^%s$" % wordlist[i], word, re.IGNORECASE)
if mo:
if wordlist[i + 1] is None:
return None
pl = re.sub(
r"\$(\d+)", r"\\1", wordlist[i + 1]
) # change $n to \n for expand
return mo.expand(pl)
return None
def classical(self, **kwargs):
"""
turn classical mode on and off for various categories
turn on all classical modes:
classical()
classical(all=True)
turn on or off specific claassical modes:
e.g.
classical(herd=True)
classical(names=False)
By default all classical modes are off except names.
unknown value in args or key in kwargs rasies
exception: UnknownClasicalModeError
"""
classical_mode = list(def_classical.keys())
if not kwargs:
self.classical_dict = all_classical.copy()
return
if "all" in kwargs:
if kwargs["all"]:
self.classical_dict = all_classical.copy()
else:
self.classical_dict = no_classical.copy()
for k, v in list(kwargs.items()):
if k in classical_mode:
self.classical_dict[k] = v
else:
raise UnknownClassicalModeError
def num(self, count=None, show=None): # (;$count,$show)
"""
Set the number to be used in other method calls.
Returns count.
Set show to False to return '' instead.
"""
if count is not None:
try:
self.persistent_count = int(count)
except ValueError:
raise BadNumValueError
if (show is None) or show:
return str(count)
else:
self.persistent_count = None
return ""
def gender(self, gender):
"""
set the gender for the singular of plural pronouns
can be one of:
'neuter' ('they' -> 'it')
'feminine' ('they' -> 'she')
'masculine' ('they' -> 'he')
'gender-neutral' ('they' -> 'they')
'feminine or masculine' ('they' -> 'she or he')
'masculine or feminine' ('they' -> 'he or she')
"""
if gender in singular_pronoun_genders:
self.thegender = gender
else:
raise BadGenderError
def _get_value_from_ast(self, obj):
"""
Return the value of the ast object.
"""
if isinstance(obj, ast.Num):
return obj.n
elif isinstance(obj, ast.Str):
return obj.s
elif isinstance(obj, ast.List):
return [self._get_value_from_ast(e) for e in obj.elts]
elif isinstance(obj, ast.Tuple):
return tuple([self._get_value_from_ast(e) for e in obj.elts])
# None, True and False are NameConstants in Py3.4 and above.
elif sys.version_info.major >= 3 and isinstance(obj, ast.NameConstant):
return obj.value
# For python versions below 3.4
elif isinstance(obj, ast.Name) and (obj.id in ["True", "False", "None"]):
return string_to_constant[obj.id]
# Probably passed a variable name.
# Or passed a single word without wrapping it in quotes as an argument
# ex: p.inflect("I plural(see)") instead of p.inflect("I plural('see')")
raise NameError("name '%s' is not defined" % obj.id)
def _string_to_substitute(self, mo, methods_dict):
"""
Return the string to be substituted for the match.
"""
matched_text, f_name = mo.groups()
# matched_text is the complete match string. e.g. plural_noun(cat)
# f_name is the function name. e.g. plural_noun
# Return matched_text if function name is not in methods_dict
if f_name not in methods_dict:
return matched_text
# Parse the matched text
a_tree = ast.parse(matched_text)
# get the args and kwargs from ast objects
args_list = [self._get_value_from_ast(a) for a in a_tree.body[0].value.args]
kwargs_list = {
kw.arg: self._get_value_from_ast(kw.value)
for kw in a_tree.body[0].value.keywords
}
# Call the corresponding function
return methods_dict[f_name](*args_list, **kwargs_list)
# 0. PERFORM GENERAL INFLECTIONS IN A STRING
def inflect(self, text):
"""
Perform inflections in a string.
e.g. inflect('The plural of cat is plural(cat)') returns
'The plural of cat is cats'
can use plural, plural_noun, plural_verb, plural_adj,
singular_noun, a, an, no, ordinal, number_to_words,
and prespart
"""
save_persistent_count = self.persistent_count
# Dictionary of allowed methods
methods_dict = {
"plural": self.plural,
"plural_adj": self.plural_adj,
"plural_noun": self.plural_noun,
"plural_verb": self.plural_verb,
"singular_noun": self.singular_noun,
"a": self.a,
"an": self.a,
"no": self.no,
"ordinal": self.ordinal,
"number_to_words": self.number_to_words,
"present_participle": self.present_participle,
"num": self.num,
}
# Regular expression to find Python's function call syntax
functions_re = re.compile(r"((\w+)\([^)]*\)*)", re.IGNORECASE)
output = functions_re.sub(
lambda mo: self._string_to_substitute(mo, methods_dict), text
)
self.persistent_count = save_persistent_count
return output
# ## PLURAL SUBROUTINES
def postprocess(self, orig, inflected):
if "|" in inflected:
inflected = inflected.split("|")[self.classical_dict["all"]]
result = inflected.split(" ")
# Try to fix word wise capitalization
for index, word in enumerate(orig.split(" ")):
if word == "I":
# Is this the only word for exceptions like this
# Where the original is fully capitalized
# without 'meaning' capitalization?
# Also this fails to handle a capitalizaion in context
continue
if word.capitalize() == word:
result[index] = result[index].capitalize()
if word == word.upper():
result[index] = result[index].upper()
return " ".join(result)
def partition_word(self, text):
mo = re.search(r"\A(\s*)(.+?)(\s*)\Z", text)
try:
return mo.group(1), mo.group(2), mo.group(3)
except AttributeError: # empty string
return "", "", ""
def plural(self, text, count=None):
"""
Return the plural of text.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(
word,
self._pl_special_adjective(word, count)
or self._pl_special_verb(word, count)
or self._plnoun(word, count),
)
return "{}{}{}".format(pre, plural, post)
def plural_noun(self, text, count=None):
"""
Return the plural of text, where text is a noun.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._plnoun(word, count))
return "{}{}{}".format(pre, plural, post)
def plural_verb(self, text, count=None):
"""
Return the plural of text, where text is a verb.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(
word,
self._pl_special_verb(word, count) or self._pl_general_verb(word, count),
)
return "{}{}{}".format(pre, plural, post)
def plural_adj(self, text, count=None):
"""
Return the plural of text, where text is an adjective.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._pl_special_adjective(word, count) or word)
return "{}{}{}".format(pre, plural, post)
def compare(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return (
self._plequal(word1, word2, self.plural_noun)
or self._plequal(word1, word2, self.plural_verb)
or self._plequal(word1, word2, self.plural_adj)
)
def compare_nouns(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as nouns
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_noun)
def compare_verbs(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as verbs
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_verb)
def compare_adjs(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as adjectives
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_adj)
def singular_noun(self, text, count=None, gender=None):
"""
Return the singular of text, where text is a plural noun.
If count supplied, then return the singular if count is one of:
1, a, an, one, each, every, this, that or if count is None
otherwise return text unchanged.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
sing = self._sinoun(word, count=count, gender=gender)
if sing is not False:
plural = self.postprocess(
word, self._sinoun(word, count=count, gender=gender)
)
return "{}{}{}".format(pre, plural, post)
return False
def _plequal(self, word1, word2, pl):
classval = self.classical_dict.copy()
self.classical_dict = all_classical.copy()
if word1 == word2:
return "eq"
if word1 == pl(word2):
return "p:s"
if pl(word1) == word2:
return "s:p"
self.classical_dict = no_classical.copy()
if word1 == pl(word2):
return "p:s"
if pl(word1) == word2:
return "s:p"
self.classical_dict = classval.copy()
if pl == self.plural or pl == self.plural_noun:
if self._pl_check_plurals_N(word1, word2):
return "p:p"
if self._pl_check_plurals_N(word2, word1):
return "p:p"
if pl == self.plural or pl == self.plural_adj:
if self._pl_check_plurals_adj(word1, word2):
return "p:p"
return False
def _pl_reg_plurals(self, pair, stems, end1, end2):
pattern = r"({})({}\|\1{}|{}\|\1{})".format(stems, end1, end2, end2, end1)
return bool(re.search(pattern, pair))
def _pl_check_plurals_N(self, word1, word2):
stem_endings = (
(pl_sb_C_a_ata, "as", "ata"),
(pl_sb_C_is_ides, "is", "ides"),
(pl_sb_C_a_ae, "s", "e"),
(pl_sb_C_en_ina, "ens", "ina"),
(pl_sb_C_um_a, "ums", "a"),
(pl_sb_C_us_i, "uses", "i"),
(pl_sb_C_on_a, "ons", "a"),
(pl_sb_C_o_i_stems, "os", "i"),
(pl_sb_C_ex_ices, "exes", "ices"),
(pl_sb_C_ix_ices, "ixes", "ices"),
(pl_sb_C_i, "s", "i"),
(pl_sb_C_im, "s", "im"),
(".*eau", "s", "x"),
(".*ieu", "s", "x"),
(".*tri", "xes", "ces"),
(".{2,}[yia]n", "xes", "ges"),
)
pair = "{}|{}".format(word1, word2)
return (
pair in pl_sb_irregular_s.values()
or pair in pl_sb_irregular.values()
or pair in pl_sb_irregular_caps.values()
or any(
self._pl_reg_plurals(pair, stems, end1, end2)
for stems, end1, end2 in stem_endings
)
)
def _pl_check_plurals_adj(self, word1, word2):
word1a = word1[: word1.rfind("'")] if word1.endswith(("'s", "'")) else ""
word2a = word2[: word2.rfind("'")] if word2.endswith(("'s", "'")) else ""
return (
word1a
and word2a
and (
self._pl_check_plurals_N(word1a, word2a)
or self._pl_check_plurals_N(word2a, word1a)
)
)
def get_count(self, count=None):
if count is None and self.persistent_count is not None:
count = self.persistent_count
if count is not None:
count = (
1
if (
(str(count) in pl_count_one)
or (
self.classical_dict["zero"]
and str(count).lower() in pl_count_zero
)
)
else 2
)
else:
count = ""
return count
# @profile
def _plnoun(self, word, count=None):
count = self.get_count(count)
# DEFAULT TO PLURAL
if count == 1:
return word
# HANDLE USER-DEFINED NOUNS
value = self.ud_match(word, self.pl_sb_user_defined)
if value is not None:
return value
# HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS
if word == "":
return word
lowerword = word.lower()
if lowerword in pl_sb_uninflected_complete:
return word
if word in pl_sb_uninflected_caps:
return word
for k, v in pl_sb_uninflected_bysize.items():
if lowerword[-k:] in v:
return word
if self.classical_dict["herd"] and lowerword in pl_sb_uninflected_herd:
return word
# HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.)
mo = re.search(r"^(?:%s)$" % pl_sb_postfix_adj_stems, word, re.IGNORECASE)
if mo and mo.group(2) != "":
return "{}{}".format(self._plnoun(mo.group(1), 2), mo.group(2))
if " a " in lowerword or "-a-" in lowerword:
mo = re.search(r"^(?:%s)$" % pl_sb_prep_dual_compound, word, re.IGNORECASE)
if mo and mo.group(2) != "" and mo.group(3) != "":
return "{}{}{}".format(
self._plnoun(mo.group(1), 2), mo.group(2), self._plnoun(mo.group(3))
)
lowersplit = lowerword.split(" ")
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return " ".join(
lowersplit[: numword - 1]
+ [self._plnoun(lowersplit[numword - 1], 2)]
+ lowersplit[numword:]
)
# only pluralize denominators in units
mo = re.search(
r"(?P<denominator>.+)( (%s) .+)" % "|".join(["per", "a"]), lowerword
)
if mo:
index = len(mo.group("denominator"))
return "{}{}".format(self._plnoun(word[:index]), word[index:])
# handle units given in degrees (only accept if
# there is no more than one word following)
# degree Celsius => degrees Celsius but degree
# fahrenheit hour => degree fahrenheit hours
if len(lowersplit) >= 2 and lowersplit[-2] in ["degree"]:
return " ".join([self._plnoun(lowersplit[0])] + lowersplit[1:])
lowersplit = lowerword.split("-")
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return " ".join(
lowersplit[: numword - 1]
+ [
self._plnoun(lowersplit[numword - 1], 2)
+ "-"
+ lowersplit[numword]
+ "-"
]
) + " ".join(lowersplit[(numword + 1) :])
# HANDLE PRONOUNS
for k, v in pl_pron_acc_keys_bysize.items():
if lowerword[-k:] in v: # ends with accusivate pronoun
for pk, pv in pl_prep_bysize.items():
if lowerword[:pk] in pv: # starts with a prep
if lowerword.split() == [lowerword[:pk], lowerword[-k:]]:
# only whitespace in between
return lowerword[:-k] + pl_pron_acc[lowerword[-k:]]
try:
return pl_pron_nom[word.lower()]
except KeyError:
pass
try:
return pl_pron_acc[word.lower()]
except KeyError:
pass
# HANDLE ISOLATED IRREGULAR PLURALS
wordsplit = word.split()
wordlast = wordsplit[-1]
lowerwordlast = wordlast.lower()
if wordlast in list(pl_sb_irregular_caps.keys()):
llen = len(wordlast)
return "{}{}".format(word[:-llen], pl_sb_irregular_caps[wordlast])
if lowerwordlast in list(pl_sb_irregular.keys()):
llen = len(lowerwordlast)
return "{}{}".format(word[:-llen], pl_sb_irregular[lowerwordlast])
if (" ".join(wordsplit[-2:])).lower() in list(pl_sb_irregular_compound.keys()):
llen = len(
" ".join(wordsplit[-2:])
) # TODO: what if 2 spaces between these words?
return "{}{}".format(
word[:-llen],
pl_sb_irregular_compound[(" ".join(wordsplit[-2:])).lower()],
)
if lowerword[-3:] == "quy":
return word[:-1] + "ies"
if lowerword[-6:] == "person":
if self.classical_dict["persons"]:
return word + "s"
else:
return word[:-4] + "ople"
# HANDLE FAMILIES OF IRREGULAR PLURALS
if lowerword[-3:] == "man":
for k, v in pl_sb_U_man_mans_bysize.items():
if lowerword[-k:] in v:
return word + "s"
for k, v in pl_sb_U_man_mans_caps_bysize.items():
if word[-k:] in v:
return word + "s"
return word[:-3] + "men"
if lowerword[-5:] == "mouse":
return word[:-5] + "mice"
if lowerword[-5:] == "louse":
return word[:-5] + "lice"
if lowerword[-5:] == "goose":
return word[:-5] + "geese"
if lowerword[-5:] == "tooth":
return word[:-5] + "teeth"
if lowerword[-4:] == "foot":
return word[:-4] + "feet"
if lowerword[-4:] == "taco":
return word[:-5] + "tacos"
if lowerword == "die":
return "dice"
# HANDLE UNASSIMILATED IMPORTS
if lowerword[-4:] == "ceps":
return word
if lowerword[-4:] == "zoon":
return word[:-2] + "a"
if lowerword[-3:] in ("cis", "sis", "xis"):
return word[:-2] + "es"
for lastlet, d, numend, post in (
("h", pl_sb_U_ch_chs_bysize, None, "s"),
("x", pl_sb_U_ex_ices_bysize, -2, "ices"),
("x", pl_sb_U_ix_ices_bysize, -2, "ices"),
("m", pl_sb_U_um_a_bysize, -2, "a"),
("s", pl_sb_U_us_i_bysize, -2, "i"),
("n", pl_sb_U_on_a_bysize, -2, "a"),
("a", pl_sb_U_a_ae_bysize, None, "e"),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE INCOMPLETELY ASSIMILATED IMPORTS
if self.classical_dict["ancient"]:
if lowerword[-4:] == "trix":
return word[:-1] + "ces"
if lowerword[-3:] in ("eau", "ieu"):
return word + "x"
if lowerword[-3:] in ("ynx", "inx", "anx") and len(word) > 4:
return word[:-1] + "ges"
for lastlet, d, numend, post in (
("n", pl_sb_C_en_ina_bysize, -2, "ina"),
("x", pl_sb_C_ex_ices_bysize, -2, "ices"),
("x", pl_sb_C_ix_ices_bysize, -2, "ices"),
("m", pl_sb_C_um_a_bysize, -2, "a"),
("s", pl_sb_C_us_i_bysize, -2, "i"),
("s", pl_sb_C_us_us_bysize, None, ""),
("a", pl_sb_C_a_ae_bysize, None, "e"),
("a", pl_sb_C_a_ata_bysize, None, "ta"),
("s", pl_sb_C_is_ides_bysize, -1, "des"),
("o", pl_sb_C_o_i_bysize, -1, "i"),
("n", pl_sb_C_on_a_bysize, -2, "a"),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
for d, numend, post in (
(pl_sb_C_i_bysize, None, "i"),
(pl_sb_C_im_bysize, None, "im"),
):
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS
if lowerword in pl_sb_singular_s_complete:
return word + "es"
for k, v in pl_sb_singular_s_bysize.items():
if lowerword[-k:] in v:
return word + "es"
if lowerword[-2:] == "es" and word[0] == word[0].upper():
return word + "es"
if lowerword[-1] == "z":
for k, v in pl_sb_z_zes_bysize.items():
if lowerword[-k:] in v:
return word + "es"
if lowerword[-2:-1] != "z":
return word + "zes"
if lowerword[-2:] == "ze":
for k, v in pl_sb_ze_zes_bysize.items():
if lowerword[-k:] in v:
return word + "s"
if lowerword[-2:] in ("ch", "sh", "zz", "ss") or lowerword[-1] == "x":
return word + "es"
# HANDLE ...f -> ...ves
if lowerword[-3:] in ("elf", "alf", "olf"):
return word[:-1] + "ves"
if lowerword[-3:] == "eaf" and lowerword[-4:-3] != "d":
return word[:-1] + "ves"
if lowerword[-4:] in ("nife", "life", "wife"):
return word[:-2] + "ves"
if lowerword[-3:] == "arf":
return word[:-1] + "ves"
# HANDLE ...y
if lowerword[-1] == "y":
if lowerword[-2:-1] in "aeiou" or len(word) == 1:
return word + "s"
if self.classical_dict["names"]:
if lowerword[-1] == "y" and word[0] == word[0].upper():
return word + "s"
return word[:-1] + "ies"
# HANDLE ...o
if lowerword in pl_sb_U_o_os_complete:
return word + "s"
for k, v in pl_sb_U_o_os_bysize.items():
if lowerword[-k:] in v:
return word + "s"
if lowerword[-2:] in ("ao", "eo", "io", "oo", "uo"):
return word + "s"
if lowerword[-1] == "o":
return word + "es"
# OTHERWISE JUST ADD ...s
return "%ss" % word
def _pl_special_verb(self, word, count=None):
if self.classical_dict["zero"] and str(count).lower() in pl_count_zero:
return False
count = self.get_count(count)
if count == 1:
return word
# HANDLE USER-DEFINED VERBS
value = self.ud_match(word, self.pl_v_user_defined)
if value is not None:
return value
# HANDLE IRREGULAR PRESENT TENSE (SIMPLE AND COMPOUND)
lowerword = word.lower()
try:
firstword = lowerword.split()[0]
except IndexError:
return False # word is ''
if firstword in list(plverb_irregular_pres.keys()):
return "{}{}".format(
plverb_irregular_pres[firstword], word[len(firstword) :]
)
# HANDLE IRREGULAR FUTURE, PRETERITE AND PERFECT TENSES
if firstword in plverb_irregular_non_pres:
return word
# HANDLE PRESENT NEGATIONS (SIMPLE AND COMPOUND)
if firstword.endswith("n't") and firstword[:-3] in list(
plverb_irregular_pres.keys()
):
return "{}n't{}".format(
plverb_irregular_pres[firstword[:-3]], word[len(firstword) :]
)
if firstword.endswith("n't"):
return word
# HANDLE SPECIAL CASES
mo = re.search(r"^(%s)$" % plverb_special_s, word)
if mo:
return False
if re.search(r"\s", word):
return False
if lowerword == "quizzes":
return "quiz"
# HANDLE STANDARD 3RD PERSON (CHOP THE ...(e)s OFF SINGLE WORDS)
if (
lowerword[-4:] in ("ches", "shes", "zzes", "sses")
or lowerword[-3:] == "xes"
):
return word[:-2]
if lowerword[-3:] == "ies" and len(word) > 3:
return lowerword[:-3] + "y"
if (
lowerword in pl_v_oes_oe
or lowerword[-4:] in pl_v_oes_oe_endings_size4
or lowerword[-5:] in pl_v_oes_oe_endings_size5
):
return word[:-1]
if lowerword.endswith("oes") and len(word) > 3:
return lowerword[:-2]
mo = re.search(r"^(.*[^s])s$", word, re.IGNORECASE)
if mo:
return mo.group(1)
# OTHERWISE, A REGULAR VERB (HANDLE ELSEWHERE)
return False
def _pl_general_verb(self, word, count=None):
count = self.get_count(count)
if count == 1:
return word
# HANDLE AMBIGUOUS PRESENT TENSES (SIMPLE AND COMPOUND)
mo = re.search(
r"^(%s)((\s.*)?)$" % plverb_ambiguous_pres_keys, word, re.IGNORECASE
)
if mo:
return "{}{}".format(
plverb_ambiguous_pres[mo.group(1).lower()], mo.group(2)
)
# HANDLE AMBIGUOUS PRETERITE AND PERFECT TENSES
mo = re.search(
r"^(%s)((\s.*)?)$" % plverb_ambiguous_non_pres, word, re.IGNORECASE
)
if mo:
return word
# OTHERWISE, 1st OR 2ND PERSON IS UNINFLECTED
return word
def _pl_special_adjective(self, word, count=None):
count = self.get_count(count)
if count == 1:
return word
# HANDLE USER-DEFINED ADJECTIVES
value = self.ud_match(word, self.pl_adj_user_defined)
if value is not None:
return value
# HANDLE KNOWN CASES
mo = re.search(r"^(%s)$" % pl_adj_special_keys, word, re.IGNORECASE)
if mo:
return "%s" % (pl_adj_special[mo.group(1).lower()])
# HANDLE POSSESSIVES
mo = re.search(r"^(%s)$" % pl_adj_poss_keys, word, re.IGNORECASE)
if mo:
return "%s" % (pl_adj_poss[mo.group(1).lower()])
mo = re.search(r"^(.*)'s?$", word)
if mo:
pl = self.plural_noun(mo.group(1))
trailing_s = "" if pl[-1] == "s" else "s"
return "{}'{}".format(pl, trailing_s)
# OTHERWISE, NO IDEA
return False
# @profile
def _sinoun(self, word, count=None, gender=None):
count = self.get_count(count)
# DEFAULT TO PLURAL
if count == 2:
return word
# SET THE GENDER
try:
if gender is None:
gender = self.thegender
elif gender not in singular_pronoun_genders:
raise BadGenderError
except (TypeError, IndexError):
raise BadGenderError
# HANDLE USER-DEFINED NOUNS
value = self.ud_match(word, self.si_sb_user_defined)
if value is not None:
return value
# HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS
if word == "":
return word
lowerword = word.lower()
if word in si_sb_ois_oi_case:
return word[:-1]
if lowerword in pl_sb_uninflected_complete:
return word
if word in pl_sb_uninflected_caps:
return word
for k, v in pl_sb_uninflected_bysize.items():
if lowerword[-k:] in v:
return word
if self.classical_dict["herd"] and lowerword in pl_sb_uninflected_herd:
return word
if lowerword in pl_sb_C_us_us:
return word
# HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.)
mo = re.search(r"^(?:%s)$" % pl_sb_postfix_adj_stems, word, re.IGNORECASE)
if mo and mo.group(2) != "":
return "{}{}".format(
self._sinoun(mo.group(1), 1, gender=gender), mo.group(2)
)
lowersplit = lowerword.split(" ")
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return " ".join(
lowersplit[: numword - 1]
+ [
self._sinoun(lowersplit[numword - 1], 1, gender=gender)
or lowersplit[numword - 1]
]
+ lowersplit[numword:]
)
lowersplit = lowerword.split("-")
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return " ".join(
lowersplit[: numword - 1]
+ [
(
self._sinoun(lowersplit[numword - 1], 1, gender=gender)
or lowersplit[numword - 1]
)
+ "-"
+ lowersplit[numword]
+ "-"
]
) + " ".join(lowersplit[(numword + 1) :])
# HANDLE PRONOUNS
for k, v in si_pron_acc_keys_bysize.items():
if lowerword[-k:] in v: # ends with accusivate pronoun
for pk, pv in pl_prep_bysize.items():
if lowerword[:pk] in pv: # starts with a prep
if lowerword.split() == [lowerword[:pk], lowerword[-k:]]:
# only whitespace in between
return lowerword[:-k] + get_si_pron(
"acc", lowerword[-k:], gender
)
try:
return get_si_pron("nom", word.lower(), gender)
except KeyError:
pass
try:
return get_si_pron("acc", word.lower(), gender)
except KeyError:
pass
# HANDLE ISOLATED IRREGULAR PLURALS
wordsplit = word.split()
wordlast = wordsplit[-1]
lowerwordlast = wordlast.lower()
if wordlast in list(si_sb_irregular_caps.keys()):
llen = len(wordlast)
return "{}{}".format(word[:-llen], si_sb_irregular_caps[wordlast])
if lowerwordlast in list(si_sb_irregular.keys()):
llen = len(lowerwordlast)
return "{}{}".format(word[:-llen], si_sb_irregular[lowerwordlast])
if (" ".join(wordsplit[-2:])).lower() in list(si_sb_irregular_compound.keys()):
llen = len(
" ".join(wordsplit[-2:])
) # TODO: what if 2 spaces between these words?
return "{}{}".format(
word[:-llen],
si_sb_irregular_compound[(" ".join(wordsplit[-2:])).lower()],
)
if lowerword[-5:] == "quies":
return word[:-3] + "y"
if lowerword[-7:] == "persons":
return word[:-1]
if lowerword[-6:] == "people":
return word[:-4] + "rson"
# HANDLE FAMILIES OF IRREGULAR PLURALS
if lowerword[-4:] == "mans":
for k, v in si_sb_U_man_mans_bysize.items():
if lowerword[-k:] in v:
return word[:-1]
for k, v in si_sb_U_man_mans_caps_bysize.items():
if word[-k:] in v:
return word[:-1]
if lowerword[-3:] == "men":
return word[:-3] + "man"
if lowerword[-4:] == "mice":
return word[:-4] + "mouse"
if lowerword[-4:] == "lice":
return word[:-4] + "louse"
if lowerword[-5:] == "geese":
return word[:-5] + "goose"
if lowerword[-5:] == "teeth":
return word[:-5] + "tooth"
if lowerword[-4:] == "feet":
return word[:-4] + "foot"
if lowerword == "dice":
return "die"
# HANDLE UNASSIMILATED IMPORTS
if lowerword[-4:] == "ceps":
return word
if lowerword[-3:] == "zoa":
return word[:-1] + "on"
for lastlet, d, numend, post in (
("s", si_sb_U_ch_chs_bysize, -1, ""),
("s", si_sb_U_ex_ices_bysize, -4, "ex"),
("s", si_sb_U_ix_ices_bysize, -4, "ix"),
("a", si_sb_U_um_a_bysize, -1, "um"),
("i", si_sb_U_us_i_bysize, -1, "us"),
("a", si_sb_U_on_a_bysize, -1, "on"),
("e", si_sb_U_a_ae_bysize, -1, ""),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE INCOMPLETELY ASSIMILATED IMPORTS
if self.classical_dict["ancient"]:
if lowerword[-6:] == "trices":
return word[:-3] + "x"
if lowerword[-4:] in ("eaux", "ieux"):
return word[:-1]
if lowerword[-5:] in ("ynges", "inges", "anges") and len(word) > 6:
return word[:-3] + "x"
for lastlet, d, numend, post in (
("a", si_sb_C_en_ina_bysize, -3, "en"),
("s", si_sb_C_ex_ices_bysize, -4, "ex"),
("s", si_sb_C_ix_ices_bysize, -4, "ix"),
("a", si_sb_C_um_a_bysize, -1, "um"),
("i", si_sb_C_us_i_bysize, -1, "us"),
("s", pl_sb_C_us_us_bysize, None, ""),
("e", si_sb_C_a_ae_bysize, -1, ""),
("a", si_sb_C_a_ata_bysize, -2, ""),
("s", si_sb_C_is_ides_bysize, -3, "s"),
("i", si_sb_C_o_i_bysize, -1, "o"),
("a", si_sb_C_on_a_bysize, -1, "on"),
("m", si_sb_C_im_bysize, -2, ""),
("i", si_sb_C_i_bysize, -1, ""),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE PLURLS ENDING IN uses -> use
if (
lowerword[-6:] == "houses"
or word in si_sb_uses_use_case
or lowerword in si_sb_uses_use
):
return word[:-1]
# HANDLE PLURLS ENDING IN ies -> ie
if word in si_sb_ies_ie_case or lowerword in si_sb_ies_ie:
return word[:-1]
# HANDLE PLURLS ENDING IN oes -> oe
if (
lowerword[-5:] == "shoes"
or word in si_sb_oes_oe_case
or lowerword in si_sb_oes_oe
):
return word[:-1]
# HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS
if word in si_sb_sses_sse_case or lowerword in si_sb_sses_sse:
return word[:-1]
if lowerword in si_sb_singular_s_complete:
return word[:-2]
for k, v in si_sb_singular_s_bysize.items():
if lowerword[-k:] in v:
return word[:-2]
if lowerword[-4:] == "eses" and word[0] == word[0].upper():
return word[:-2]
if lowerword in si_sb_z_zes:
return word[:-2]
if lowerword in si_sb_zzes_zz:
return word[:-2]
if lowerword[-4:] == "zzes":
return word[:-3]
if word in si_sb_ches_che_case or lowerword in si_sb_ches_che:
return word[:-1]
if lowerword[-4:] in ("ches", "shes"):
return word[:-2]
if lowerword in si_sb_xes_xe:
return word[:-1]
if lowerword[-3:] == "xes":
return word[:-2]
# HANDLE ...f -> ...ves
if word in si_sb_ves_ve_case or lowerword in si_sb_ves_ve:
return word[:-1]
if lowerword[-3:] == "ves":
if lowerword[-5:-3] in ("el", "al", "ol"):
return word[:-3] + "f"
if lowerword[-5:-3] == "ea" and word[-6:-5] != "d":
return word[:-3] + "f"
if lowerword[-5:-3] in ("ni", "li", "wi"):
return word[:-3] + "fe"
if lowerword[-5:-3] == "ar":
return word[:-3] + "f"
# HANDLE ...y
if lowerword[-2:] == "ys":
if len(lowerword) > 2 and lowerword[-3] in "aeiou":
return word[:-1]
if self.classical_dict["names"]:
if lowerword[-2:] == "ys" and word[0] == word[0].upper():
return word[:-1]
if lowerword[-3:] == "ies":
return word[:-3] + "y"
# HANDLE ...o
if lowerword[-2:] == "os":
if lowerword in si_sb_U_o_os_complete:
return word[:-1]
for k, v in si_sb_U_o_os_bysize.items():
if lowerword[-k:] in v:
return word[:-1]
if lowerword[-3:] in ("aos", "eos", "ios", "oos", "uos"):
return word[:-1]
if lowerword[-3:] == "oes":
return word[:-2]
# UNASSIMILATED IMPORTS FINAL RULE
if word in si_sb_es_is:
return word[:-2] + "is"
# OTHERWISE JUST REMOVE ...s
if lowerword[-1] == "s":
return word[:-1]
# COULD NOT FIND SINGULAR
return False
# ADJECTIVES
def a(self, text, count=1):
"""
Return the appropriate indefinite article followed by text.
The indefinite article is either 'a' or 'an'.
If count is not one, then return count followed by text
instead of 'a' or 'an'.
Whitespace at the start and end is preserved.
"""
mo = re.search(r"\A(\s*)(?:an?\s+)?(.+?)(\s*)\Z", text, re.IGNORECASE)
if mo:
word = mo.group(2)
if not word:
return text
pre = mo.group(1)
post = mo.group(3)
result = self._indef_article(word, count)
return "{}{}{}".format(pre, result, post)
return ""
an = a
def _indef_article(self, word, count):
mycount = self.get_count(count)
if mycount != 1:
return "{} {}".format(count, word)
# HANDLE USER-DEFINED VARIANTS
value = self.ud_match(word, self.A_a_user_defined)
if value is not None:
return "{} {}".format(value, word)
# HANDLE ORDINAL FORMS
for a in ((r"^(%s)" % A_ordinal_a, "a"), (r"^(%s)" % A_ordinal_an, "an")):
mo = re.search(a[0], word, re.IGNORECASE)
if mo:
return "{} {}".format(a[1], word)
# HANDLE SPECIAL CASES
for a in (
(r"^(%s)" % A_explicit_an, "an"),
(r"^[aefhilmnorsx]$", "an"),
(r"^[bcdgjkpqtuvwyz]$", "a"),
):
mo = re.search(a[0], word, re.IGNORECASE)
if mo:
return "{} {}".format(a[1], word)
# HANDLE ABBREVIATIONS
for a in (
(r"(%s)" % A_abbrev, "an", re.VERBOSE),
(r"^[aefhilmnorsx][.-]", "an", re.IGNORECASE),
(r"^[a-z][.-]", "a", re.IGNORECASE),
):
mo = re.search(a[0], word, a[2])
if mo:
return "{} {}".format(a[1], word)
# HANDLE CONSONANTS
mo = re.search(r"^[^aeiouy]", word, re.IGNORECASE)
if mo:
return "a %s" % word
# HANDLE SPECIAL VOWEL-FORMS
for a in (
(r"^e[uw]", "a"),
(r"^onc?e\b", "a"),
(r"^onetime\b", "a"),
(r"^uni([^nmd]|mo)", "a"),
(r"^u[bcfghjkqrst][aeiou]", "a"),
(r"^ukr", "a"),
(r"^(%s)" % A_explicit_a, "a"),
):
mo = re.search(a[0], word, re.IGNORECASE)
if mo:
return "{} {}".format(a[1], word)
# HANDLE SPECIAL CAPITALS
mo = re.search(r"^U[NK][AIEO]?", word)
if mo:
return "a %s" % word
# HANDLE VOWELS
mo = re.search(r"^[aeiou]", word, re.IGNORECASE)
if mo:
return "an %s" % word
# HANDLE y... (BEFORE CERTAIN CONSONANTS IMPLIES (UNNATURALIZED) "i.." SOUND)
mo = re.search(r"^(%s)" % A_y_cons, word, re.IGNORECASE)
if mo:
return "an %s" % word
# OTHERWISE, GUESS "a"
return "a %s" % word
# 2. TRANSLATE ZERO-QUANTIFIED $word TO "no plural($word)"
def no(self, text, count=None):
"""
If count is 0, no, zero or nil, return 'no' followed by the plural
of text.
If count is one of:
1, a, an, one, each, every, this, that
return count followed by text.
Otherwise return count follow by the plural of text.
In the return value count is always followed by a space.
Whitespace at the start and end is preserved.
"""
if count is None and self.persistent_count is not None:
count = self.persistent_count
if count is None:
count = 0
mo = re.search(r"\A(\s*)(.+?)(\s*)\Z", text)
pre = mo.group(1)
word = mo.group(2)
post = mo.group(3)
if str(count).lower() in pl_count_zero:
return "{}no {}{}".format(pre, self.plural(word, 0), post)
else:
return "{}{} {}{}".format(pre, count, self.plural(word, count), post)
# PARTICIPLES
def present_participle(self, word):
"""
Return the present participle for word.
word is the 3rd person singular verb.
"""
plv = self.plural_verb(word, 2)
for pat, repl in (
(r"ie$", r"y"),
(r"ue$", r"u"), # TODO: isn't ue$ -> u encompassed in the following rule?
(r"([auy])e$", r"\g<1>"),
(r"ski$", r"ski"),
(r"[^b]i$", r""),
(r"^(are|were)$", r"be"),
(r"^(had)$", r"hav"),
(r"^(hoe)$", r"\g<1>"),
(r"([^e])e$", r"\g<1>"),
(r"er$", r"er"),
(r"([^aeiou][aeiouy]([bdgmnprst]))$", r"\g<1>\g<2>"),
):
(ans, num) = re.subn(pat, repl, plv)
if num:
return "%sing" % ans
return "%sing" % ans
# NUMERICAL INFLECTIONS
def ordinal(self, num):
"""
Return the ordinal of num.
num can be an integer or text
e.g. ordinal(1) returns '1st'
ordinal('one') returns 'first'
"""
if re.match(r"\d", str(num)):
try:
num % 2
n = num
except TypeError:
if "." in str(num):
try:
# numbers after decimal,
# so only need last one for ordinal
n = int(num[-1])
except ValueError: # ends with '.', so need to use whole string
n = int(num[:-1])
else:
n = int(num)
try:
post = nth[n % 100]
except KeyError:
post = nth[n % 10]
return "{}{}".format(num, post)
else:
mo = re.search(r"(%s)\Z" % ordinal_suff, num)
try:
post = ordinal[mo.group(1)]
return re.sub(r"(%s)\Z" % ordinal_suff, post, num)
except AttributeError:
return "%sth" % num
def millfn(self, ind=0):
if ind > len(mill) - 1:
print3("number out of range")
raise NumOutOfRangeError
return mill[ind]
def unitfn(self, units, mindex=0):
return "{}{}".format(unit[units], self.millfn(mindex))
def tenfn(self, tens, units, mindex=0):
if tens != 1:
return "{}{}{}{}".format(
ten[tens],
"-" if tens and units else "",
unit[units],
self.millfn(mindex),
)
return "{}{}".format(teen[units], mill[mindex])
def hundfn(self, hundreds, tens, units, mindex):
if hundreds:
andword = " %s " % self.number_args["andword"] if tens or units else ""
return "{} hundred{}{}{}, ".format(
unit[hundreds], # use unit not unitfn as simpler
andword,
self.tenfn(tens, units),
self.millfn(mindex),
)
if tens or units:
return "{}{}, ".format(self.tenfn(tens, units), self.millfn(mindex))
return ""
def group1sub(self, mo):
units = int(mo.group(1))
if units == 1:
return " %s, " % self.number_args["one"]
elif units:
return "%s, " % unit[units]
else:
return " %s, " % self.number_args["zero"]
def group1bsub(self, mo):
units = int(mo.group(1))
if units:
return "%s, " % unit[units]
else:
return " %s, " % self.number_args["zero"]
def group2sub(self, mo):
tens = int(mo.group(1))
units = int(mo.group(2))
if tens:
return "%s, " % self.tenfn(tens, units)
if units:
return " {} {}, ".format(self.number_args["zero"], unit[units])
return " {} {}, ".format(self.number_args["zero"], self.number_args["zero"])
def group3sub(self, mo):
hundreds = int(mo.group(1))
tens = int(mo.group(2))
units = int(mo.group(3))
if hundreds == 1:
hunword = " %s" % self.number_args["one"]
elif hundreds:
hunword = "%s" % unit[hundreds]
else:
hunword = " %s" % self.number_args["zero"]
if tens:
tenword = self.tenfn(tens, units)
elif units:
tenword = " {} {}".format(self.number_args["zero"], unit[units])
else:
tenword = " {} {}".format(
self.number_args["zero"], self.number_args["zero"]
)
return "{} {}, ".format(hunword, tenword)
def hundsub(self, mo):
ret = self.hundfn(
int(mo.group(1)), int(mo.group(2)), int(mo.group(3)), self.mill_count
)
self.mill_count += 1
return ret
def tensub(self, mo):
return "%s, " % self.tenfn(int(mo.group(1)), int(mo.group(2)), self.mill_count)
def unitsub(self, mo):
return "%s, " % self.unitfn(int(mo.group(1)), self.mill_count)
def enword(self, num, group):
# import pdb
# pdb.set_trace()
if group == 1:
num = re.sub(r"(\d)", self.group1sub, num)
elif group == 2:
num = re.sub(r"(\d)(\d)", self.group2sub, num)
num = re.sub(r"(\d)", self.group1bsub, num, 1)
elif group == 3:
num = re.sub(r"(\d)(\d)(\d)", self.group3sub, num)
num = re.sub(r"(\d)(\d)", self.group2sub, num, 1)
num = re.sub(r"(\d)", self.group1sub, num, 1)
elif int(num) == 0:
num = self.number_args["zero"]
elif int(num) == 1:
num = self.number_args["one"]
else:
num = num.lstrip().lstrip("0")
self.mill_count = 0
# surely there's a better way to do the next bit
mo = re.search(r"(\d)(\d)(\d)(?=\D*\Z)", num)
while mo:
num = re.sub(r"(\d)(\d)(\d)(?=\D*\Z)", self.hundsub, num, 1)
mo = re.search(r"(\d)(\d)(\d)(?=\D*\Z)", num)
num = re.sub(r"(\d)(\d)(?=\D*\Z)", self.tensub, num, 1)
num = re.sub(r"(\d)(?=\D*\Z)", self.unitsub, num, 1)
return num
def blankfn(self, mo):
""" do a global blank replace
TODO: surely this can be done with an option to re.sub
rather than this fn
"""
return ""
def commafn(self, mo):
""" do a global ',' replace
TODO: surely this can be done with an option to re.sub
rather than this fn
"""
return ","
def spacefn(self, mo):
""" do a global ' ' replace
TODO: surely this can be done with an option to re.sub
rather than this fn
"""
return " "
def number_to_words(
self,
num,
wantlist=False,
group=0,
comma=",",
andword="and",
zero="zero",
one="one",
decimal="point",
threshold=None,
):
"""
Return a number in words.
group = 1, 2 or 3 to group numbers before turning into words
comma: define comma
andword: word for 'and'. Can be set to ''.
e.g. "one hundred and one" vs "one hundred one"
zero: word for '0'
one: word for '1'
decimal: word for decimal point
threshold: numbers above threshold not turned into words
parameters not remembered from last call. Departure from Perl version.
"""
self.number_args = dict(andword=andword, zero=zero, one=one)
num = "%s" % num
# Handle "stylistic" conversions (up to a given threshold)...
if threshold is not None and float(num) > threshold:
spnum = num.split(".", 1)
while comma:
(spnum[0], n) = re.subn(r"(\d)(\d{3}(?:,|\Z))", r"\1,\2", spnum[0])
if n == 0:
break
try:
return "{}.{}".format(spnum[0], spnum[1])
except IndexError:
return "%s" % spnum[0]
if group < 0 or group > 3:
raise BadChunkingOptionError
nowhite = num.lstrip()
if nowhite[0] == "+":
sign = "plus"
elif nowhite[0] == "-":
sign = "minus"
else:
sign = ""
myord = num[-2:] in ("st", "nd", "rd", "th")
if myord:
num = num[:-2]
finalpoint = False
if decimal:
if group != 0:
chunks = num.split(".")
else:
chunks = num.split(".", 1)
if chunks[-1] == "": # remove blank string if nothing after decimal
chunks = chunks[:-1]
finalpoint = True # add 'point' to end of output
else:
chunks = [num]
first = 1
loopstart = 0
if chunks[0] == "":
first = 0
if len(chunks) > 1:
loopstart = 1
for i in range(loopstart, len(chunks)):
chunk = chunks[i]
# remove all non numeric \D
chunk = re.sub(r"\D", self.blankfn, chunk)
if chunk == "":
chunk = "0"
if group == 0 and (first == 0 or first == ""):
chunk = self.enword(chunk, 1)
else:
chunk = self.enword(chunk, group)
if chunk[-2:] == ", ":
chunk = chunk[:-2]
chunk = re.sub(r"\s+,", self.commafn, chunk)
if group == 0 and first:
chunk = re.sub(r", (\S+)\s+\Z", " %s \\1" % andword, chunk)
chunk = re.sub(r"\s+", self.spacefn, chunk)
# chunk = re.sub(r"(\A\s|\s\Z)", self.blankfn, chunk)
chunk = chunk.strip()
if first:
first = ""
chunks[i] = chunk
numchunks = []
if first != 0:
numchunks = chunks[0].split("%s " % comma)
if myord and numchunks:
# TODO: can this be just one re as it is in perl?
mo = re.search(r"(%s)\Z" % ordinal_suff, numchunks[-1])
if mo:
numchunks[-1] = re.sub(
r"(%s)\Z" % ordinal_suff, ordinal[mo.group(1)], numchunks[-1]
)
else:
numchunks[-1] += "th"
for chunk in chunks[1:]:
numchunks.append(decimal)
numchunks.extend(chunk.split("%s " % comma))
if finalpoint:
numchunks.append(decimal)
# wantlist: Perl list context. can explictly specify in Python
if wantlist:
if sign:
numchunks = [sign] + numchunks
return numchunks
elif group:
signout = "%s " % sign if sign else ""
return "{}{}".format(signout, ", ".join(numchunks))
else:
signout = "%s " % sign if sign else ""
num = "{}{}".format(signout, numchunks.pop(0))
if decimal is None:
first = True
else:
first = not num.endswith(decimal)
for nc in numchunks:
if nc == decimal:
num += " %s" % nc
first = 0
elif first:
num += "{} {}".format(comma, nc)
else:
num += " %s" % nc
return num
# Join words with commas and a trailing 'and' (when appropriate)...
def join(
self,
words,
sep=None,
sep_spaced=True,
final_sep=None,
conj="and",
conj_spaced=True,
):
"""
Join words into a list.
e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly'
options:
conj: replacement for 'and'
sep: separator. default ',', unless ',' is in the list then ';'
final_sep: final separator. default ',', unless ',' is in the list then ';'
conj_spaced: boolean. Should conj have spaces around it
"""
if not words:
return ""
if len(words) == 1:
return words[0]
if conj_spaced:
if conj == "":
conj = " "
else:
conj = " %s " % conj
if len(words) == 2:
return "{}{}{}".format(words[0], conj, words[1])
if sep is None:
if "," in "".join(words):
sep = ";"
else:
sep = ","
if final_sep is None:
final_sep = sep
final_sep = "{}{}".format(final_sep, conj)
if sep_spaced:
sep += " "
return "{}{}{}".format(sep.join(words[0:-1]), final_sep, words[-1])
| dantebarba/docker-media-server | plex/Sub-Zero.bundle/Contents/Libraries/Shared/inflect.py | Python | gpl-3.0 | 97,744 | [
"CASINO",
"ESPResSo",
"Elk",
"MOOSE",
"Octopus"
] | 8a9d8963bd5055a8d03c99f8e8171e4b405611dcc31b6d8a8b45d78361b276f0 |
########################################################################
# File : Watchdog.py
# Author: Stuart Paterson
########################################################################
""" The Watchdog class is used by the Job Wrapper to resolve and monitor
the system resource consumption. The Watchdog can determine if
a running job is stalled and indicate this to the Job Wrapper.
Furthermore, the Watchdog will identify when the Job CPU limit has been
exceeded and fail jobs meaningfully.
Information is returned to the WMS via the heart-beat mechanism. This
also interprets control signals from the WMS e.g. to kill a running
job.
- Still to implement:
- CPU normalization for correct comparison with job limit
"""
import os
import re
import time
import resource
import errno
import socket
import getpass
import psutil
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities import MJF
from DIRAC.Core.Utilities.Profiler import Profiler
from DIRAC.Core.Utilities.Os import getDiskSpace
from DIRAC.Core.Utilities.Subprocess import getChildrenPIDs
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemInstance
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.TimeLeft import TimeLeft
from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient
from DIRAC.WorkloadManagementSystem.Client import JobMinorStatus
class Watchdog(object):
#############################################################################
def __init__(self, pid, exeThread, spObject, jobCPUTime, memoryLimit=0, processors=1, jobArgs={}):
"""Constructor, takes system flag as argument."""
self.stopSigStartSeconds = int(jobArgs.get("StopSigStartSeconds", 1800)) # 30 minutes
self.stopSigFinishSeconds = int(jobArgs.get("StopSigFinishSeconds", 1800)) # 30 minutes
self.stopSigNumber = int(jobArgs.get("StopSigNumber", 2)) # SIGINT
self.stopSigRegex = jobArgs.get("StopSigRegex", None)
self.stopSigSent = False
self.log = gLogger.getSubLogger("Watchdog")
self.exeThread = exeThread
self.wrapperPID = pid
self.appPID = self.exeThread.getCurrentPID()
self.spObject = spObject
self.jobCPUTime = jobCPUTime
self.memoryLimit = memoryLimit
self.calibration = 0
self.initialValues = {}
self.parameters = {}
self.peekFailCount = 0
self.peekRetry = 5
self.profiler = Profiler(pid)
self.checkError = ""
self.currentStats = {}
self.initialized = False
self.count = 0
# defaults
self.testWallClock = 1
self.testDiskSpace = 1
self.testLoadAvg = 1
self.maxWallClockTime = 3 * 24 * 60 * 60
self.testCPUConsumed = 1
self.testCPULimit = 0
self.testMemoryLimit = 0
self.testTimeLeft = 1
self.pollingTime = 10 # 10 seconds
self.checkingTime = 30 * 60 # 30 minute period
self.minCheckingTime = 20 * 60 # 20 mins
self.wallClockCheckSeconds = 5 * 60 # 5 minutes
self.maxWallClockTime = 3 * 24 * 60 * 60 # e.g. 4 days
self.jobPeekFlag = 1 # on / off
self.minDiskSpace = 10 # MB
self.loadAvgLimit = 1000 # > 1000 and jobs killed
self.sampleCPUTime = 30 * 60 # e.g. up to 20mins sample
self.jobCPUMargin = 20 # %age buffer before killing job
self.minCPUWallClockRatio = 5 # ratio %age
self.nullCPULimit = 5 # After 5 sample times return null CPU consumption kill job
self.checkCount = 0
self.wallClockCheckCount = 0
self.nullCPUCount = 0
self.grossTimeLeftLimit = 10 * self.checkingTime
self.timeLeftUtil = TimeLeft()
self.timeLeft = 0
self.littleTimeLeft = False
self.cpuPower = 1.0
self.processors = processors
#############################################################################
def initialize(self):
"""Watchdog initialization."""
if self.initialized:
self.log.info("Watchdog already initialized")
return S_OK()
else:
self.initialized = True
setup = gConfig.getValue("/DIRAC/Setup", "")
if not setup:
return S_ERROR("Can not get the DIRAC Setup value")
wms_instance = getSystemInstance("WorkloadManagement")
if not wms_instance:
return S_ERROR("Can not get the WorkloadManagement system instance")
self.section = "/Systems/WorkloadManagement/%s/JobWrapper" % wms_instance
self.log.verbose("Watchdog initialization")
# Test control flags
self.testWallClock = gConfig.getValue(self.section + "/CheckWallClockFlag", 1)
self.testDiskSpace = gConfig.getValue(self.section + "/CheckDiskSpaceFlag", 1)
self.testLoadAvg = gConfig.getValue(self.section + "/CheckLoadAvgFlag", 1)
self.testCPUConsumed = gConfig.getValue(self.section + "/CheckCPUConsumedFlag", 1)
self.testCPULimit = gConfig.getValue(self.section + "/CheckCPULimitFlag", 0)
self.testMemoryLimit = gConfig.getValue(self.section + "/CheckMemoryLimitFlag", 0)
self.testTimeLeft = gConfig.getValue(self.section + "/CheckTimeLeftFlag", 1)
# Other parameters
self.pollingTime = gConfig.getValue(self.section + "/PollingTime", 10) # 10 seconds
self.checkingTime = gConfig.getValue(self.section + "/CheckingTime", 30 * 60) # 30 minute period
self.minCheckingTime = gConfig.getValue(self.section + "/MinCheckingTime", 20 * 60) # 20 mins
self.maxWallClockTime = gConfig.getValue(self.section + "/MaxWallClockTime", 3 * 24 * 60 * 60) # e.g. 4 days
self.jobPeekFlag = gConfig.getValue(self.section + "/JobPeekFlag", 1) # on / off
self.minDiskSpace = gConfig.getValue(self.section + "/MinDiskSpace", 10) # MB
self.loadAvgLimit = gConfig.getValue(self.section + "/LoadAverageLimit", 1000) # > 1000 and jobs killed
self.sampleCPUTime = gConfig.getValue(self.section + "/CPUSampleTime", 30 * 60) # e.g. up to 20mins sample
self.jobCPUMargin = gConfig.getValue(self.section + "/JobCPULimitMargin", 20) # %age buffer before killing job
self.minCPUWallClockRatio = gConfig.getValue(self.section + "/MinCPUWallClockRatio", 5) # ratio %age
# After 5 sample times return null CPU consumption kill job
self.nullCPULimit = gConfig.getValue(self.section + "/NullCPUCountLimit", 5)
if self.checkingTime < self.minCheckingTime:
self.log.info(
"Requested CheckingTime of %s setting to %s seconds (minimum)"
% (self.checkingTime, self.minCheckingTime)
)
self.checkingTime = self.minCheckingTime
# The time left is returned in seconds @ 250 SI00 = 1 HS06,
# the self.checkingTime and self.pollingTime are in seconds,
# thus they need to be multiplied by a large enough factor
self.fineTimeLeftLimit = gConfig.getValue(self.section + "/TimeLeftLimit", 150 * self.pollingTime)
self.cpuPower = gConfig.getValue("/LocalSite/CPUNormalizationFactor", 1.0)
return S_OK()
def run(self):
"""The main watchdog execution method"""
result = self.initialize()
if not result["OK"]:
self.log.always("Can not start watchdog for the following reason")
self.log.always(result["Message"])
return result
try:
while True:
self.log.debug("Starting watchdog loop # %d" % self.count)
start_cycle_time = time.time()
result = self.execute()
exec_cycle_time = time.time() - start_cycle_time
if not result["OK"]:
self.log.error("Watchdog error during execution", result["Message"])
break
elif result["Value"] == "Ended":
break
self.count += 1
if exec_cycle_time < self.pollingTime:
time.sleep(self.pollingTime - exec_cycle_time)
return S_OK()
except Exception:
self.log.exception()
return S_ERROR("Exception")
#############################################################################
def execute(self):
"""The main agent execution method of the Watchdog."""
if not self.exeThread.is_alive():
self.__getUsageSummary()
self.log.info("Process to monitor has completed, Watchdog will exit.")
return S_OK("Ended")
# WallClock checks every self.wallClockCheckSeconds, but only if StopSigRegex is defined in JDL
if (
not self.stopSigSent
and self.stopSigRegex is not None
and (time.time() - self.initialValues["StartTime"]) > self.wallClockCheckSeconds * self.wallClockCheckCount
):
self.wallClockCheckCount += 1
self._performWallClockChecks()
if self.littleTimeLeft:
# if we have gone over enough iterations query again
if self.littleTimeLeftCount == 0 and self.__timeLeft() == -1:
self.checkError = JobMinorStatus.JOB_EXCEEDED_CPU
self.log.error(self.checkError, self.timeLeft)
self.__killRunningThread()
return S_OK()
else:
self.littleTimeLeftCount -= 1
# Note: need to poll regularly to see if the thread is alive
# but only perform checks with a certain frequency
if (time.time() - self.initialValues["StartTime"]) > self.checkingTime * self.checkCount:
self.checkCount += 1
result = self._performChecks()
if not result["OK"]:
self.log.warn("Problem during recent checks")
self.log.warn(result["Message"])
return S_OK()
else:
# self.log.debug('Application thread is alive: checking count is %s' %(self.checkCount))
return S_OK()
#############################################################################
def _performWallClockChecks(self):
"""Watchdog performs the wall clock checks based on MJF. Signals are sent
to processes if we need to stop, but function always returns S_OK()
"""
mjf = MJF.MJF()
try:
wallClockSecondsLeft = mjf.getWallClockSecondsLeft()
except Exception as e:
# Just stop if we can't get the wall clock seconds left
return S_OK()
jobstartSeconds = mjf.getIntJobFeature("jobstart_secs")
if jobstartSeconds is None:
# Just stop if we don't know when the job started
return S_OK()
if (int(time.time()) > jobstartSeconds + self.stopSigStartSeconds) and (
wallClockSecondsLeft < self.stopSigFinishSeconds + self.wallClockCheckSeconds
):
# Need to send the signal! Assume it works to avoid sending the signal more than once
self.log.info("Sending signal to JobWrapper children", "(%s)" % self.stopSigNumber)
self.stopSigSent = True
try:
for childPid in getChildrenPIDs(self.wrapperPID):
try:
cmdline = open("/proc/%d/cmdline" % childPid, "r").read().replace("\0", " ").strip()
except IOError:
# Process gone away? Not running on Linux? Skip anyway
continue
if re.search(self.stopSigRegex, cmdline) is not None:
self.log.info(
'Sending signal %d to process ID %d, cmdline = "%s"'
% (self.stopSigNumber, childPid, cmdline)
)
os.kill(childPid, self.stopSigNumber)
except Exception as e:
self.log.error("Failed to send signals to JobWrapper children!", repr(e))
return S_OK()
#############################################################################
def _performChecks(self):
"""The Watchdog checks are performed at a different period to the checking of the
application thread and correspond to the checkingTime.
"""
self.log.verbose("------------------------------------")
self.log.verbose("Checking loop starts for Watchdog")
heartBeatDict = {}
msg = ""
loadAvg = float(os.getloadavg()[0])
msg += "LoadAvg: %d " % loadAvg
heartBeatDict["LoadAverage"] = loadAvg
if "LoadAverage" not in self.parameters:
self.parameters["LoadAverage"] = []
self.parameters["LoadAverage"].append(loadAvg)
memoryUsed = self.getMemoryUsed()
msg += "MemUsed: %.1f kb " % (memoryUsed)
heartBeatDict["MemoryUsed"] = memoryUsed
if "MemoryUsed" not in self.parameters:
self.parameters["MemoryUsed"] = []
self.parameters["MemoryUsed"].append(memoryUsed)
result = self.profiler.vSizeUsage(withChildren=True)
if not result["OK"]:
self.log.warn("Could not get vSize info from profiler", result["Message"])
else:
vsize = result["Value"] * 1024.0
heartBeatDict["Vsize"] = vsize
self.parameters.setdefault("Vsize", [])
self.parameters["Vsize"].append(vsize)
msg += "Job Vsize: %.1f kb " % vsize
result = self.profiler.memoryUsage(withChildren=True)
if not result["OK"]:
self.log.warn("Could not get rss info from profiler", result["Message"])
else:
rss = result["Value"] * 1024.0
heartBeatDict["RSS"] = rss
self.parameters.setdefault("RSS", [])
self.parameters["RSS"].append(rss)
msg += "Job RSS: %.1f kb " % rss
if "DiskSpace" not in self.parameters:
self.parameters["DiskSpace"] = []
# We exclude fuse so that mountpoints can be cleaned up by automount after a period unused
# (specific request from CERN batch service).
result = self.getDiskSpace(exclude="fuse")
if not result["OK"]:
self.log.warn("Could not establish DiskSpace", result["Message"])
else:
msg += "DiskSpace: %.1f MB " % (result["Value"])
self.parameters["DiskSpace"].append(result["Value"])
heartBeatDict["AvailableDiskSpace"] = result["Value"]
cpu = self.__getCPU()
if not cpu["OK"]:
msg += "CPU: ERROR "
hmsCPU = 0
else:
cpu = cpu["Value"]
msg += "CPU: %s (h:m:s) " % (cpu)
if "CPUConsumed" not in self.parameters:
self.parameters["CPUConsumed"] = []
self.parameters["CPUConsumed"].append(cpu)
hmsCPU = cpu
rawCPU = self.__convertCPUTime(hmsCPU)
if rawCPU["OK"]:
heartBeatDict["CPUConsumed"] = rawCPU["Value"]
result = self.__getWallClockTime()
if not result["OK"]:
self.log.warn("Failed determining wall clock time", result["Message"])
else:
msg += "WallClock: %.2f s " % (result["Value"])
self.parameters.setdefault("WallClockTime", list()).append(result["Value"])
heartBeatDict["WallClockTime"] = result["Value"] * self.processors
self.log.info(msg)
result = self._checkProgress()
if not result["OK"]:
self.checkError = result["Message"]
self.log.warn(self.checkError)
if self.jobPeekFlag:
result = self.__peek()
if result["OK"]:
outputList = result["Value"]
self.log.info("Last lines of available application output:")
self.log.info("================START================")
for line in outputList:
self.log.info(line)
self.log.info("=================END=================")
self.__killRunningThread()
return S_OK()
recentStdOut = "None"
if self.jobPeekFlag:
result = self.__peek()
if result["OK"]:
outputList = result["Value"]
size = len(outputList)
recentStdOut = "Last %s lines of application output from Watchdog on %s [UTC]:" % (
size,
Time.dateTime(),
)
border = "=" * len(recentStdOut)
cpuTotal = "Last reported CPU consumed for job is %s (h:m:s)" % (hmsCPU)
if self.timeLeft:
cpuTotal += ", Batch Queue Time Left %s (s @ HS06)" % self.timeLeft
recentStdOut = "\n%s\n%s\n%s\n%s\n" % (border, recentStdOut, cpuTotal, border)
self.log.info(recentStdOut)
for line in outputList:
self.log.info(line)
recentStdOut += line + "\n"
else:
recentStdOut = (
"Watchdog is initializing and will attempt to obtain standard output from application thread"
)
self.log.info(recentStdOut)
self.peekFailCount += 1
if self.peekFailCount > self.peekRetry:
self.jobPeekFlag = 0
self.log.warn("Turning off job peeking for remainder of execution")
if "JOBID" not in os.environ:
self.log.info("Running without JOBID so parameters will not be reported")
return S_OK()
jobID = os.environ["JOBID"]
staticParamDict = {"StandardOutput": recentStdOut}
self.__sendSignOfLife(int(jobID), heartBeatDict, staticParamDict)
return S_OK("Watchdog checking cycle complete")
#############################################################################
def __getCPU(self):
"""Uses the profiler to get CPU time for current process, its child, and the terminated child,
and returns HH:MM:SS after conversion.
"""
result = self.profiler.cpuUsageUser(withChildren=True, withTerminatedChildren=True)
if not result["OK"]:
self.log.warn("Issue while checking consumed CPU for user", result["Message"])
if result["Errno"] == errno.ESRCH:
self.log.warn("The main process does not exist (anymore). This might be correct.")
return result
cpuUsageUser = result["Value"]
result = self.profiler.cpuUsageSystem(withChildren=True, withTerminatedChildren=True)
if not result["OK"]:
self.log.warn("Issue while checking consumed CPU for system", result["Message"])
if result["Errno"] == errno.ESRCH:
self.log.warn("The main process does not exist (anymore). This might be correct.")
return result
cpuUsageSystem = result["Value"]
cpuTimeTotal = cpuUsageUser + cpuUsageSystem
if cpuTimeTotal:
self.log.verbose("Raw CPU time consumed (s) =", cpuTimeTotal)
return self.__getCPUHMS(cpuTimeTotal)
self.log.error("CPU time consumed found to be 0")
return S_ERROR()
#############################################################################
def __getCPUHMS(self, cpuTime):
mins, secs = divmod(cpuTime, 60)
hours, mins = divmod(mins, 60)
humanTime = "%02d:%02d:%02d" % (hours, mins, secs)
self.log.verbose("Human readable CPU time is: %s" % humanTime)
return S_OK(humanTime)
#############################################################################
def __interpretControlSignal(self, signalDict):
"""This method is called whenever a signal is sent via the result of
sending a sign of life.
"""
self.log.info("Received control signal")
if isinstance(signalDict, dict):
if "Kill" in signalDict:
self.log.info("Received Kill signal, stopping job via control signal")
self.checkError = JobMinorStatus.RECEIVED_KILL_SIGNAL
self.__killRunningThread()
else:
self.log.info("The following control signal was sent but not understood by the watchdog:")
self.log.info(signalDict)
else:
self.log.info("Expected dictionary for control signal", "received:\n%s" % (signalDict))
return S_OK()
#############################################################################
def _checkProgress(self):
"""This method calls specific tests to determine whether the job execution
is proceeding normally. CS flags can easily be added to add or remove
tests via central configuration.
"""
report = ""
if self.testWallClock:
result = self.__checkWallClockTime()
if not result["OK"]:
self.log.warn(result["Message"])
return result
report += "WallClock: OK, "
else:
report += "WallClock: NA,"
if self.testDiskSpace:
result = self.__checkDiskSpace()
if not result["OK"]:
self.log.warn(result["Message"])
return result
report += "DiskSpace: OK, "
else:
report += "DiskSpace: NA,"
if self.testLoadAvg:
result = self.__checkLoadAverage()
if not result["OK"]:
self.log.warn(
"Check of load average failed, but won't fail because of that", ": %s" % result["Message"]
)
report += "LoadAverage: ERROR, "
return S_OK()
report += "LoadAverage: OK, "
else:
report += "LoadAverage: NA,"
if self.testCPUConsumed:
result = self.__checkCPUConsumed()
if not result["OK"]:
return result
report += "CPUConsumed: OK, "
else:
report += "CPUConsumed: NA, "
if self.testCPULimit:
result = self.__checkCPULimit()
if not result["OK"]:
self.log.warn(result["Message"])
return result
report += "CPULimit OK, "
else:
report += "CPULimit: NA, "
if self.testTimeLeft:
self.__timeLeft()
if self.timeLeft:
report += "TimeLeft: OK"
else:
report += "TimeLeft: NA"
if self.testMemoryLimit:
result = self.__checkMemoryLimit()
if not result["OK"]:
self.log.warn(result["Message"])
return result
report += "MemoryLimit OK, "
else:
report += "MemoryLimit: NA, "
self.log.info(report)
return S_OK("All enabled checks passed")
#############################################################################
def __checkCPUConsumed(self):
"""Checks whether the CPU consumed by application process is reasonable. This
method will report stalled jobs to be killed.
"""
self.log.info("Checking CPU Consumed")
if "WallClockTime" not in self.parameters:
return S_ERROR("Missing WallClockTime info")
if "CPUConsumed" not in self.parameters:
return S_ERROR("Missing CPUConsumed info")
wallClockTime = self.parameters["WallClockTime"][-1]
if wallClockTime < self.sampleCPUTime:
self.log.info(
"Stopping check, wallclock time is still smaller than sample time",
"(%s) < (%s)" % (wallClockTime, self.sampleCPUTime),
)
return S_OK()
intervals = max(1, int(self.sampleCPUTime / self.checkingTime))
if len(self.parameters["CPUConsumed"]) < intervals + 1:
self.log.info(
"Not enough snapshots to calculate",
"there are %s and we need %s" % (len(self.parameters["CPUConsumed"]), intervals + 1),
)
return S_OK()
wallClockTime = self.parameters["WallClockTime"][-1] - self.parameters["WallClockTime"][-1 - intervals]
try:
cpuTime = self.__convertCPUTime(self.parameters["CPUConsumed"][-1])["Value"]
# For some reason, some times the CPU consumed estimation returns 0
# if cpuTime == 0:
# return S_OK()
cpuTime -= self.__convertCPUTime(self.parameters["CPUConsumed"][-1 - intervals])["Value"]
if cpuTime < 0:
self.log.warn("Consumed CPU time negative, something wrong may have happened, ignore")
return S_OK()
if wallClockTime <= 0:
self.log.warn("Wallclock time should not be negative or zero, Ignore")
return S_OK()
ratio = (cpuTime / wallClockTime) * 100
self.log.info("CPU/Wallclock ratio is %.2f%%" % ratio)
# in case of error cpuTime might be 0, exclude this
if ratio < self.minCPUWallClockRatio:
if (
os.path.exists("DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK")
or "DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK" in os.environ
):
self.log.warn(
"N.B. job would be declared as stalled but CPU / WallClock check is disabled by payload"
)
return S_OK()
self.log.info("Job is stalled!")
return S_ERROR(JobMinorStatus.WATCHDOG_STALLED)
except Exception as e:
self.log.error("Cannot convert CPU consumed from string to int", str(e))
return S_OK()
#############################################################################
def __convertCPUTime(self, cputime):
"""Method to convert the CPU time as returned from the Watchdog
instances to the equivalent DIRAC normalized CPU time to be compared
to the Job CPU requirement.
"""
cpuValue = 0
cpuHMS = cputime.split(":")
# for i in range( len( cpuHMS ) ):
# cpuHMS[i] = cpuHMS[i].replace( '00', '0' )
try:
hours = float(cpuHMS[0]) * 60 * 60
mins = float(cpuHMS[1]) * 60
secs = float(cpuHMS[2])
cpuValue = float(hours + mins + secs)
except Exception as x:
self.log.warn(str(x))
return S_ERROR("Could not calculate CPU time")
# Normalization to be implemented
normalizedCPUValue = cpuValue
result = S_OK()
result["Value"] = normalizedCPUValue
self.log.debug("CPU value %s converted to %s" % (cputime, normalizedCPUValue))
return result
#############################################################################
def __checkCPULimit(self):
"""Checks that the job has consumed more than the job CPU requirement
(plus a configurable margin) and kills them as necessary.
"""
consumedCPU = 0
if "CPUConsumed" in self.parameters:
consumedCPU = self.parameters["CPUConsumed"][-1]
consumedCPUDict = self.__convertCPUTime(consumedCPU)
if consumedCPUDict["OK"]:
currentCPU = consumedCPUDict["Value"]
else:
return S_OK("Not possible to determine current CPU consumed")
if consumedCPU:
limit = int(self.jobCPUTime + self.jobCPUTime * (self.jobCPUMargin / 100))
cpuConsumed = float(currentCPU)
if cpuConsumed > limit:
self.log.info(
"Job has consumed more than the specified CPU limit",
"with an additional %s%% margin" % (self.jobCPUMargin),
)
return S_ERROR("Job has exceeded maximum CPU time limit")
return S_OK("Job within CPU limit")
if not currentCPU:
self.log.verbose("Both initial and current CPU consumed are null")
return S_OK("CPU consumed is not measurable yet")
return S_OK("Not possible to determine CPU consumed")
def __checkMemoryLimit(self):
"""Checks that the job memory consumption is within a limit"""
vsize = 0
if "Vsize" in self.parameters:
vsize = self.parameters["Vsize"][-1]
if vsize and self.memoryLimit:
if vsize > self.memoryLimit:
vsize = vsize
# Just a warning for the moment
self.log.warn(
"Job has consumed %f.2 KB of memory with the limit of %f.2 KB" % (vsize, self.memoryLimit)
)
return S_OK()
#############################################################################
def __checkDiskSpace(self):
"""Checks whether the CS defined minimum disk space is available."""
if "DiskSpace" in self.parameters:
availSpace = self.parameters["DiskSpace"][-1]
if availSpace >= 0 and availSpace < self.minDiskSpace:
self.log.info(
"Not enough local disk space for job to continue, defined in CS as %s MB" % (self.minDiskSpace)
)
return S_ERROR(JobMinorStatus.JOB_INSUFFICIENT_DISK)
else:
return S_OK("Job has enough disk space available")
else:
return S_ERROR("Available disk space could not be established")
#############################################################################
def __checkWallClockTime(self):
"""Checks whether the job has been running for the CS defined maximum
wall clock time.
"""
if "StartTime" in self.initialValues:
startTime = self.initialValues["StartTime"]
if time.time() - startTime > self.maxWallClockTime:
self.log.info("Job has exceeded maximum wall clock time of %s seconds" % (self.maxWallClockTime))
return S_ERROR(JobMinorStatus.JOB_EXCEEDED_WALL_CLOCK)
else:
return S_OK("Job within maximum wall clock time")
else:
return S_ERROR("Job start time could not be established")
#############################################################################
def __checkLoadAverage(self):
"""Checks whether the CS defined maximum load average is exceeded."""
if "LoadAverage" in self.parameters:
loadAvg = self.parameters["LoadAverage"][-1]
if loadAvg > float(self.loadAvgLimit):
self.log.info("Maximum load average exceeded, defined in CS as %s " % (self.loadAvgLimit))
return S_ERROR("Job exceeded maximum load average")
return S_OK("Job running with normal load average")
return S_ERROR("Job load average not established")
#############################################################################
def __peek(self):
"""Uses ExecutionThread.getOutput() method to obtain standard output
from running thread via subprocess callback function.
"""
result = self.exeThread.getOutput()
if not result["OK"]:
self.log.warn("Could not obtain output from running application thread")
self.log.warn(result["Message"])
return result
#############################################################################
def calibrate(self):
"""The calibrate method obtains the initial values for system memory and load
and calculates the margin for error for the rest of the Watchdog cycle.
"""
self.__getWallClockTime()
self.parameters["WallClockTime"] = []
cpuConsumed = self.__getCPU()
if not cpuConsumed["OK"]:
self.log.warn("Could not establish CPU consumed, setting to 0.0")
cpuConsumed = 0.0
else:
cpuConsumed = cpuConsumed["Value"]
self.initialValues["CPUConsumed"] = cpuConsumed
self.parameters["CPUConsumed"] = []
self.initialValues["LoadAverage"] = float(os.getloadavg()[0])
self.parameters["LoadAverage"] = []
memUsed = self.getMemoryUsed()
self.initialValues["MemoryUsed"] = memUsed
self.parameters["MemoryUsed"] = []
result = self.profiler.vSizeUsage(withChildren=True)
if not result["OK"]:
self.log.warn("Could not get vSize info from profiler", result["Message"])
else:
vsize = result["Value"] * 1024.0
self.initialValues["Vsize"] = vsize
self.log.verbose("Vsize(kb)", "%.1f" % vsize)
self.parameters["Vsize"] = []
result = self.profiler.memoryUsage(withChildren=True)
if not result["OK"]:
self.log.warn("Could not get rss info from profiler", result["Message"])
else:
rss = result["Value"] * 1024.0
self.initialValues["RSS"] = rss
self.log.verbose("RSS(kb)", "%.1f" % rss)
self.parameters["RSS"] = []
# We exclude fuse so that mountpoints can be cleaned up by automount after a period unused
# (specific request from CERN batch service).
result = self.getDiskSpace(exclude="fuse")
self.log.verbose("DiskSpace: %s" % (result))
if not result["OK"]:
self.log.warn("Could not establish DiskSpace")
else:
self.initialValues["DiskSpace"] = result["Value"]
self.parameters["DiskSpace"] = []
result = self.getNodeInformation()
self.log.verbose("NodeInfo", result)
if "LSB_JOBID" in os.environ:
result["LocalJobID"] = os.environ["LSB_JOBID"]
if "PBS_JOBID" in os.environ:
result["LocalJobID"] = os.environ["PBS_JOBID"]
if "QSUB_REQNAME" in os.environ:
result["LocalJobID"] = os.environ["QSUB_REQNAME"]
if "JOB_ID" in os.environ:
result["LocalJobID"] = os.environ["JOB_ID"]
self.__reportParameters(result, "NodeInformation", True)
self.__reportParameters(self.initialValues, "InitialValues")
return S_OK()
def __timeLeft(self):
"""
return Normalized CPU time left in the batch system
0 if not available
update self.timeLeft and self.littleTimeLeft
"""
# Get CPU time left in the batch system
result = self.timeLeftUtil.getTimeLeft(0.0)
if not result["OK"]:
# Could not get CPU time left, we might need to wait for the first loop
# or the Utility is not working properly for this batch system
# or we are in a batch system
timeLeft = 0
else:
timeLeft = result["Value"]
self.timeLeft = timeLeft
if not self.littleTimeLeft:
if timeLeft and timeLeft < self.grossTimeLeftLimit:
self.log.info("TimeLeft bellow %s, now checking with higher frequency" % timeLeft)
self.littleTimeLeft = True
# TODO: better configurable way of doing this to be coded
self.littleTimeLeftCount = 15
else:
if self.timeLeft and self.timeLeft < self.fineTimeLeftLimit:
timeLeft = -1
return timeLeft
#############################################################################
def __getUsageSummary(self):
"""Returns average load, memory etc. over execution of job thread"""
summary = {}
# CPUConsumed
if "CPUConsumed" in self.parameters:
cpuList = self.parameters["CPUConsumed"]
if cpuList:
hmsCPU = cpuList[-1]
rawCPU = self.__convertCPUTime(hmsCPU)
if rawCPU["OK"]:
summary["LastUpdateCPU(s)"] = rawCPU["Value"]
else:
summary["LastUpdateCPU(s)"] = "Could not be estimated"
# DiskSpace
if "DiskSpace" in self.parameters:
space = self.parameters["DiskSpace"]
if space:
value = abs(float(space[-1]) - float(self.initialValues["DiskSpace"]))
if value < 0:
value = 0
summary["DiskSpace(MB)"] = value
else:
summary["DiskSpace(MB)"] = "Could not be estimated"
# MemoryUsed
if "MemoryUsed" in self.parameters:
memory = self.parameters["MemoryUsed"]
if memory:
summary["MemoryUsed(kb)"] = abs(float(memory[-1]) - float(self.initialValues["MemoryUsed"]))
else:
summary["MemoryUsed(kb)"] = "Could not be estimated"
# LoadAverage
if "LoadAverage" in self.parameters:
laList = self.parameters["LoadAverage"]
if laList:
summary["LoadAverage"] = sum(laList) / len(laList)
else:
summary["LoadAverage"] = "Could not be estimated"
result = self.__getWallClockTime()
if not result["OK"]:
self.log.warn("Failed determining wall clock time", result["Message"])
summary["WallClockTime(s)"] = 0
summary["ScaledCPUTime(s)"] = 0
else:
wallClock = result["Value"]
summary["WallClockTime(s)"] = wallClock * self.processors
summary["ScaledCPUTime(s)"] = wallClock * self.cpuPower * self.processors
self.__reportParameters(summary, "UsageSummary", True)
self.currentStats = summary
#############################################################################
def __reportParameters(self, params, title=None, report=False):
"""Will report parameters for job."""
try:
parameters = []
self.log.info("", "==========================================================")
if title:
self.log.info("Watchdog will report", title)
else:
self.log.info("Watchdog will report parameters")
self.log.info("", "==========================================================")
vals = params
if "Value" in params:
if vals["Value"]:
vals = params["Value"]
for k, v in vals.items():
if v:
self.log.info(str(k) + " = " + str(v))
parameters.append([k, v])
if report:
self.__setJobParamList(parameters)
self.log.info("", "==========================================================")
except Exception as x:
self.log.warn("Problem while reporting parameters")
self.log.warn(repr(x))
#############################################################################
def __getWallClockTime(self):
"""Establishes the Wall Clock time spent since the Watchdog initialization"""
result = S_OK()
if "StartTime" in self.initialValues:
currentTime = time.time()
wallClock = currentTime - self.initialValues["StartTime"]
result["Value"] = wallClock
else:
self.initialValues["StartTime"] = time.time()
result["Value"] = 0.0
return result
#############################################################################
def __killRunningThread(self):
"""Will kill the running thread process and any child processes."""
self.log.info("Sending kill signal to application PID %s" % (self.spObject.getChildPID()))
result = self.spObject.killChild()
self.applicationKilled = True
self.log.info("Subprocess.killChild() returned:%s " % (result))
return S_OK("Thread killed")
#############################################################################
def __sendSignOfLife(self, jobID, heartBeatDict, staticParamDict):
"""Sends sign of life 'heartbeat' signal and triggers control signal
interpretation.
"""
result = JobStateUpdateClient().sendHeartBeat(jobID, heartBeatDict, staticParamDict)
if not result["OK"]:
self.log.warn("Problem sending sign of life")
self.log.warn(result)
if result["OK"] and result["Value"]:
self.__interpretControlSignal(result["Value"])
return result
#############################################################################
def __setJobParamList(self, value):
"""Wraps around setJobParameters of state update client"""
# job wrapper template sets the jobID variable
if "JOBID" not in os.environ:
self.log.info("Running without JOBID so parameters will not be reported")
return S_OK()
jobID = os.environ["JOBID"]
jobParam = JobStateUpdateClient().setJobParameters(int(jobID), value)
self.log.verbose("setJobParameters(%s,%s)" % (jobID, value))
if not jobParam["OK"]:
self.log.warn(jobParam["Message"])
return jobParam
#############################################################################
def getNodeInformation(self):
"""Retrieves all static system information"""
result = {}
result["HostName"] = socket.gethostname()
result["CPU(MHz)"] = psutil.cpu_freq()[0]
result["Memory(kB)"] = int(psutil.virtual_memory()[1] / 1024)
result["LocalAccount"] = getpass.getuser()
with open("/proc/cpuinfo", "r") as cpuinfo:
info = cpuinfo.readlines()
result["ModelName"] = info[4].split(":")[1].replace(" ", "").replace("\n", "")
result["CacheSize(kB)"] = [x.strip().split(":")[1] for x in info if "cache size" in x][0].strip()
return result
#############################################################################
def getMemoryUsed(self):
"""Obtains the memory used."""
mem = (
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss
)
return float(mem)
#############################################################################
def getDiskSpace(self, exclude=None):
"""Obtains the available disk space."""
result = S_OK()
diskSpace = getDiskSpace(exclude=exclude)
if diskSpace == -1:
result = S_ERROR("Could not obtain disk usage")
self.log.warn(" Could not obtain disk usage")
result["Value"] = float(-1)
return result
result["Value"] = float(diskSpace)
return result
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| DIRACGrid/DIRAC | src/DIRAC/WorkloadManagementSystem/JobWrapper/Watchdog.py | Python | gpl-3.0 | 43,077 | [
"DIRAC"
] | 8a7140368b808149c942a5f5bcae5ceb4280417cd509a4cd28431f5d48a4e01f |
import pylab
import nest
neuron1 = nest.Create("iaf_neuron")
#nest.SetStatus(neuron1, {"I_e": 376.0})
neuron2 = nest.Create("iaf_neuron")
multimeter = nest.Create("multimeter")
nest.SetStatus(multimeter, {"withtime":True, "record_from":["V_m"]})
multimeter2 = nest.Create("multimeter")
nest.SetStatus(multimeter2, {"withtime":True, "record_from":["V_m"]})
noise = nest.Create("poisson_generator", 2)
nest.SetStatus(noise, [{"rate": 80000.0}, {"rate": 15000.0}])
syn_dict_ex = {"weight": 1.2}
syn_dict_in = {"weight": -2.0}
nest.Connect([noise[0]], neuron1, syn_spec=syn_dict_ex)
nest.Connect([noise[1]], neuron1, syn_spec=syn_dict_in)
weight = 20.0
delay = 1.0
#nest.Connect(neuron1, neuron2, weight, delay)
nest.Connect(neuron1, neuron2, syn_spec = {"weight":0.1})
'''
NOTE: figure 2's height of voltage traces is directly effected by the weight specified above
'''
nest.Connect(multimeter, neuron1)
nest.Connect(multimeter2, neuron2)
spikedetector = nest.Create("spike_detector",
params={"withgid": True, "withtime": True})
#nest.Connect(multimeter, neuron)
nest.Connect(neuron1, spikedetector)
nest.Simulate(1000.0)
dmm = nest.GetStatus(multimeter)[0]
Vms = dmm["events"]["V_m"]
ts = dmm["events"]["times"]
dmm2 = nest.GetStatus(multimeter2)[0]
Vms2 = dmm2["events"]["V_m"]
ts2 = dmm2["events"]["times"]
pylab.figure(1)
pylab.plot(ts, Vms)
pylab.figure(2)
pylab.plot(ts2, Vms2)
dSD = nest.GetStatus(spikedetector,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
pylab.figure(3)
pylab.plot(ts, evs, ".")
pylab.show() | nmsutton/MemoryModule | python_version/examples/nestTest3.py | Python | mit | 1,555 | [
"NEURON"
] | 0d221b8b28287135745971a0c56f5946d5deb6ca2cd5448a08ff92c481107a92 |
"""
A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
from setuptools import setup # Always prefer setuptools over distutils
# from os import path
VERSION = '0.0.9'
LAST_MODIFIED_DATE = '2020-04-30' # by RJH — when setup.py was modified below
# INCLUDE_DERIVED_DATA_PICKLE_FILES = True
# INCLUDE_DERIVED_DATA_JSON_FILES = False
# this_folderpath = path.abspath(path.dirname(__file__))
# Get the long description from the README file
#with open(path.join(this_folderpath, 'README.md'), encoding='utf-8') as f:
# long_description = f.read()
package_data_list = [
'DataFiles/Biblelator.gif', 'DataFiles/Biblelator.jpg',
'DataFiles/BiblelatorLogo.gif', 'DataFiles/BiblelatorLogoSmall.gif',
]
# if INCLUDE_DERIVED_DATA_PICKLE_FILES:
# package_data_list += [
# 'DataFiles/DerivedFiles/iso_639_3_Languages_Tables.pickle',
# 'DataFiles/DerivedFiles/USFM2Markers_Tables.pickle',
# ]
# if INCLUDE_DERIVED_DATA_JSON_FILES:
# package_data_list += [
# 'DataFiles/DerivedFiles/iso_639_3_Languages_Tables.json',
# 'DataFiles/DerivedFiles/USFM2Markers_Tables.json',
# ]
setup(
name='Biblelator',
version=VERSION,
packages=['Biblelator',
'Biblelator.Apps',
'Biblelator.Dialogs',
'Biblelator.Helpers',
'Biblelator.Settings',
'Biblelator.Windows',
],
package_dir ={ 'Biblelator': 'Biblelator' },
package_data={ 'Biblelator': package_data_list },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# metadata to display on PyPI
# This should be your name or the name of the organization which owns the project.
author="Robert Hunt",
author_email="Freely.Given.org+Biblelator@gmail.com",
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description="Biblelator — experimental USFM Bible Editor",
license='GPLv3',
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description="""
A USFM Bible editor using the [BibleOrgSys](https://pypi.org/project/BibleOrgSys/)
library and Python's tKinter windowing library for simple and easy installation.
(It's all in the standard CPython install.)
On most systems, Biblelator can be installed (as shown above) with:
`pip install Biblelator`
but if it complains, maybe:
`python3 -m pip install Biblelator`
The app can be run from the command line with:
`Biblelator`
which should start up and display a smallish window,
or to view all the available options:
`Biblelator --help`
You can discover the program version (doesn't match the package version) with:
`Biblelator --version`
Biblelator reads or creates a `BiblelatorData` folder in your home folder.
Log files are stored in a subfolder there and may be useful for reporting errors.
(If you have start-up problems, you might want to edit the settings there,
or even delete the entire settings file if you have no complex saved windows settings yet.)
Because some Bible users need to remain anonymous, Biblelator defaults to no internet use.
However this reduces functionality, and most users will want to run the program once,
and then close it and edit the `Biblelator.ini` file created in the `BiblelatorData/BiblelatorSettings` folder
and change `internetAccess` to `Enabled`.
While there, you might as well update the `currentUserName` and other entries
under `[Users]`.
The other auxilliary apps included are `BiblelatorSettingsEditor`,
`BOSManager` (for inspecting Bible Organisational Systems), and
`SwordManager` for viewing
and downloading [Sword](https://crosswire.org/sword/index.jsp) modules.
(All three are at various stages of incompleteness.)
NOTE: This packaging is still being tested following massive restructuring,
and is not necessarily fully functional until it is marked as v0.1.0 or higher
when some open-licensed resources will also be downloadable.
We also have hopes to improve documentation before v0.2.0.
After that point, we also hope to release some prepackaged versions—
probably including Docker and Snap.
This software has been developed in very small chunks of spare time since 2013
(so it's not necessarily well structured, and definitely not polished).
However, it was used as my main Bible editor instead of Paratext
for a couple of years.
This package will not reach v1.0.0 until after the BibleOrgSys reaches it.
The API will not become fixed/stable until the v1.0.0 release.
No attempt at all has been made at memory or speed optimisations
and this is not planned until after the release of v1.0.0.
So if it becomes very slow, you might need to close some Bible resource windows.
Biblelator is developed and tested on Linux (Ubuntu) but should also run on Windows and OS X (although not so well tested).
See https://ubsicap.github.io/usfm/ for more information about USFM.
""",
# long_description=long_description,
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown',
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords="Bible Scripture translation editor USFM",
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url="http://Freely-Given.org/Software/Biblelator/",
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
#project_urls={ # Optional
# 'Bug Reports': 'https://github.com/pypa/sampleproject/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
# 'Source': 'https://github.com/pypa/sampleproject/',
#},
project_urls={
#"Bug Tracker": "https://bugs.example.com/HelloWorld/",
#"Documentation": "https://docs.example.com/HelloWorld/",
"Source Code": "https://github.com/openscriptures/Biblelator/",
},
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[
# How mature is this project? Common values are
# 1 - Planning
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 2 - Pre-Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Religion',
'Topic :: Religion',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# These classifiers are *not* checked by 'pip install'. See instead
# 'python_requires' below.
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
],
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. If you
# do not support Python 2, you can simplify this to '>=3.5' or similar, see
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires='>=3.7',
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['BibleOrgSys>=0.0.21'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
entry_points={
'console_scripts': [
'Biblelator=Biblelator.Biblelator:run',
'BiblelatorSettingsEditor=Biblelator.Apps.BiblelatorSettingsEditor:run',
'BOSManager=Biblelator.Apps.BOSManager:run',
'SwordManager=Biblelator.Apps.SwordManager:run',
],
},
)
| openscriptures/Biblelator | setup.py | Python | gpl-3.0 | 10,916 | [
"VisIt"
] | db3c06c2687305d77465660c2a00034ed3a5e1234cfe291bb3b656a9058fe803 |
#!/usr/bin/env yamtbx.python
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
"""
Convert XPARM.XDS information for labelit programs.
Use PHENIX_TRUST_OTHER_ENV=1 to start this script.
"""
import re, sys, os, pickle, math
import numpy
from cctbx import crystal_orientation
import cctbx.sgtbx.bravais_types
from iotbx.detectors.context.endstation import EndStation
import iotbx.detectors
from labelit.symmetry.subgroup import MetricSubgroup
from iotbx.detectors.context.config_detector import beam_center_convention_from_image_object
from scitbx import matrix
from yamtbx.dataproc.xds import *
from yamtbx.dataproc.xds.xparm import XPARM
ftype = numpy.float64
"""
import pickle
H = pickle.load(open("LABELIT_possible"))
G = pickle.load(open("LABELIT_pickle"))
"""
def get_spot_convention(imagefile):
# Maybe 2 if RAXIS else 0..
return 2
from spotfinder.command_line.signal_strength import master_params
phil_params = master_params.extract()
imageobject = iotbx.detectors.ImageFactory(G["file"][start_frame])
beam_center_convention_from_image_object(imageobject, phil_params)
print "SPOT CONVENTION=", phil_params.spot_convention
return phil_params.spot_convention
# get_spot_convention()
class XPARM_to_labelit(XPARM):
def __init__(self, xparm_file):
XPARM.__init__(self, xparm_file)
self.bin = 1
self.set_bin()
# __init__()
def set_bin(self):
# By default, labelit calls setBin(2) if size1 > 4000.
# See: $PHENIX/cctbx_project/spotfinder/diffraction/imagefiles.py +245
if self.nx > 4000 or self.ny > 4000:
self.bin = 2
# We must *NOT* modify qx,qy, nx,ny since it would be used later. (e.g. beam center conversion)
print "BINNING=", self.bin, "QX, QY, NX, NY=", self.qx*self.bin, self.qy*self.bin, self.nx//self.bin, self.ny//self.bin
# set_bin()
def get_bravais(self):
return str(cctbx.sgtbx.bravais_types.bravais_lattice(1))
return str(cctbx.sgtbx.bravais_types.bravais_lattice(self.spacegroup))
# get_bravais()
def get_system(self):
return cctbx.sgtbx.space_group_info(1).group().crystal_system().lower()
return cctbx.sgtbx.space_group_info(self.spacegroup).group().crystal_system().lower()
# get_system()
def get_labelit_orient(self):
UNI_I = matrix.sqr((0,1,0,1,0,0,0,0,-1)).inverse()
a, b, c = tuple(self.a_axis), tuple(self.b_axis), tuple(self.c_axis)
matXDS = matrix.sqr(a+b+c)
print "matXDS=", matXDS[4]
matRossmann = (UNI_I * matXDS.transpose()).transpose()
orient = crystal_orientation.crystal_orientation(matRossmann, False) # reciprocal flag
rotation_ax = self.get_endstation().rot_axi
orient = orient.rotate_thru(rotation_ax, -self.starting_angle*math.pi/180.)
return orient
# get_labelit_orient()
def get_labelit_xy_beam(self):
# Definition of beam center is different between XDS and MOSFLM!
n = numpy.array((0.,0.,1.)) # FIXME. Not always true. needs cross_prod(DIRECTION_OF_DETECTOR_X-AXIS=, DIRECTION_OF_DETECTOR_Y-AXIS=)
b = self.incident_beam
offset = abs(self.distance) * (1./numpy.dot(b,n) * b - n)
print "BEAM CENTER OFFSET=", offset[0]/self.qx, offset[1]/self.qy
return self.origin[1]*self.qy + offset[1], self.origin[0]*self.qx + offset[0]
# get_labelit_xy_beam()
def get_endstation(self):
UNI_I = matrix.sqr((0,1,0,1,0,0,0,0,-1)).inverse()
e = EndStation()
#e.set_camera_convention() always 1?
e.set_rotation_axis(UNI_I*matrix.col(self.rotation_axis))
print "endstation.rot_axi=", e.rot_axi
print "endstation.cam_con", tuple(e.cam_con)
return e
# get_endstation()
def get_distance(self):
return abs(self.distance)
def get_pixel_size(self):
return self.qx * self.bin
def get_size1_size2(self):
return self.nx // self.bin, self.ny // self.bin # FIXME. reversed?? OK?
# class XPARM_to_labelit
def template_to_filename(img_template, iframe):
re_var = re.compile("\?+")
# like: "????"
var = re_var.search(img_template).group()
repl = "%%.%dd" % len(var)
# replace e.g. ???? => 0001
return img_template.replace(var, repl % iframe)
# template_to_filenames()
if __name__ == "__main__":
#wdir = "/Users/yam/crystal/Lysozyme/Lyz080513B/xds_process_Lyz"
wdir = sys.argv[1]
inp = dict(get_xdsinp_keyword(os.path.join(wdir, "XDS.INP")))
xparm = XPARM_to_labelit(os.path.join(wdir, "GXPARM.XDS"))
G = {}
G["pixel_size"] = xparm.get_pixel_size()
start_frame = int(inp["DATA_RANGE"].split()[0])
G["file"] = {start_frame: template_to_filename(inp["NAME_TEMPLATE_OF_DATA_FRAMES"], start_frame)}
G["size1"], G["size2"] = xparm.get_size1_size2()
#img = iotbx.detectors.ImageFactory(G["file"][start_frame])
G["spot_convention"] = get_spot_convention(G["file"][start_frame])
G["endstation"] = xparm.get_endstation()
G["recommended_grid_sampling"] = 0.018128529847134645 #0.029 # Tekitou
G["xbeam"], G["ybeam"] = xparm.get_labelit_xy_beam()
G["distance"] = xparm.get_distance() #inp["DETECTOR_DISTANCE"]
G["twotheta"] = 0.
G['wavelength'] = inp["X-RAY_WAVELENGTH"]
G['ref_maxcel'] = 362.259397 #114.373193 # FIXME? Maybe doesn't matter.
G['deltaphi'] = inp["OSCILLATION_RANGE"]
G["best_integration"] = {}
G["best_integration"]["mosaicity"] = 0.15 # FIXME? Maybe doesn't matter.
G["best_integration"]["orient"] = xparm.get_labelit_orient()
print "orient:", G["best_integration"]["orient"]
print G
H = []
h = MetricSubgroup()
h["counter"] = 1
h["orient"] = xparm.get_labelit_orient()
#h["mosaicity"] = 0.075 # FIXME
h["bravais"] = xparm.get_bravais() # like oP
# Needed?
h["status"] = "ok"
h["residual"] = 0.1 # FIXME
h["system"] = xparm.get_system()
h["refined x beam"], h["refined y beam"] = xparm.get_labelit_xy_beam()
h["refined distance"] = xparm.get_distance()
h["max_angular_difference"] = 0.3 # FIXME
h["count_GOOD"] = 239 # FIXME
H.append(h)
print H
pickle.dump(G, open(os.path.join(wdir, "LABELIT_pickle"), "w"))
pickle.dump(H, open(os.path.join(wdir, "LABELIT_possible"), "w"))
print
print "Run:"
print "labelit.precession_photo bravais_choice=1 image_range=%d,%d intensity_full_scale=512 plot_section=H,K,0 pdf_output.file=HK0_xds.pdf" % tuple(map(lambda x:int(x), inp["DATA_RANGE"].split()))
print "labelit.precession_photo bravais_choice=1 image_range=%d,%d intensity_full_scale=512 plot_section=0,K,L pdf_output.file=0KL_xds.pdf" % tuple(map(lambda x:int(x), inp["DATA_RANGE"].split()))
print "labelit.precession_photo bravais_choice=1 image_range=%d,%d intensity_full_scale=512 plot_section=H,0,L pdf_output.file=H0L_xds.pdf" % tuple(map(lambda x:int(x), inp["DATA_RANGE"].split()))
| keitaroyam/yamtbx | yamtbx/dataproc/xds/command_line/xds2labelit.py | Python | bsd-3-clause | 7,054 | [
"CRYSTAL"
] | 0a9c878e06f52024ebd6d941eb62ec6e661e45016d86e53da41fbcdc9533df3c |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#########
Reporting
#########
*Created on Thu Jun 8 14:40 2017 by A. Pahl*
Tools for creating HTML Reports."""
import time
import base64
import os
import os.path as op
from string import Template
from io import BytesIO as IO
import pandas as pd
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import numpy as np
from PIL import Image, ImageChops
import matplotlib.pyplot as plt
from . import tools as cpt
from . import report_templ as cprt
from . import processing as cpp
try:
from . import resource_paths as cprp
except ImportError:
from . import resource_paths_templ as cprp
print("* Resource paths not found, stub loaded.")
print(" Automatic loading of resources will not work,")
print(" please have a look at resource_paths_templ.py")
from .config import (ACT_PROF_PARAMETERS, ACT_CUTOFF_PERC, ACT_CUTOFF_PERC_REF,
LIMIT_ACTIVITY_H, LIMIT_ACTIVITY_L,
LIMIT_CELL_COUNT_H, LIMIT_CELL_COUNT_L,
LIMIT_SIMILARITY_H, LIMIT_SIMILARITY_L,
PARAMETER_HELP)
Draw.DrawingOptions.atomLabelFontFace = "DejaVu Sans"
Draw.DrawingOptions.atomLabelFontSize = 18
from IPython.core.display import HTML
try:
from misc_tools import apl_tools
AP_TOOLS = True
# Library version
VERSION = apl_tools.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} (commit: {})".format(__name__, VERSION))
except ImportError:
AP_TOOLS = False
print("{:45s} ({})".format(__name__, time.strftime("%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
try:
# Try to import Avalon so it can be used for generation of 2d coordinates.
from rdkit.Avalon import pyAvalonTools as pyAv
USE_AVALON_2D = True
except ImportError:
print(" * Avalon not available. Using RDKit for 2d coordinate generation.")
USE_AVALON_2D = False
def check_2d_coords(mol, force=False):
"""Check if a mol has 2D coordinates and if not, calculate them."""
if not force:
try:
mol.GetConformer()
except ValueError:
force = True # no 2D coords... calculate them
if force:
if USE_AVALON_2D:
pyAv.Generate2DCoords(mol)
else:
mol.Compute2DCoords()
def mol_from_smiles(smi, calc_2d=True):
mol = Chem.MolFromSmiles(smi)
if not mol:
mol = Chem.MolFromSmiles("*")
else:
if calc_2d:
check_2d_coords(mol)
return mol
def autocrop(im, bgcolor="white"):
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
return None # no contents
def get_value(str_val):
if not str_val:
return ""
try:
val = float(str_val)
if "." not in str_val:
val = int(val)
except ValueError:
val = str_val
return val
def isnumber(x):
"""Returns True, if x is a number (i.e. can be converted to float)."""
try:
float(x)
return True
except:
return False
def convert_bool(dict, dkey, true="Yes", false="No", default="n.d."):
if dkey in dict:
if dict[dkey]:
dict[dkey] = true
else:
dict[dkey] = false
else:
dict[dkey] = default
def load_image(path, well, channel):
image_fn = "{}/{}_w{}.jpg".format(path, well, channel)
im = Image.open(image_fn)
return im
def b64_mol(mol, size=300):
img_file = IO()
try:
img = autocrop(Draw.MolToImage(mol, size=(size, size)))
except UnicodeEncodeError:
print(Chem.MolToSmiles(mol))
mol = Chem.MolFromSmiles("C")
img = autocrop(Draw.MolToImage(mol, size=(size, size)))
img.save(img_file, format='PNG')
b64 = base64.b64encode(img_file.getvalue())
b64 = b64.decode()
img_file.close()
return b64
def b64_img(im, format="JPEG"):
if isinstance(im, IO):
needs_close = False
img_file = im
else:
needs_close = True
img_file = IO()
im.save(img_file, format=format)
b64 = base64.b64encode(img_file.getvalue())
b64 = b64.decode()
if needs_close:
img_file.close()
return b64
def mol_img_tag(mol, options=None):
tag = """<img {} src="data:image/png;base64,{}" alt="Mol"/>"""
if options is None:
options = ""
img_tag = tag.format(options, b64_mol(mol))
return img_tag
def img_tag(im, format="jpeg", options=None):
tag = """<img {} src="data:image/{};base64,{}" alt="Image"/>"""
if options is None:
options = ""
b = b64_img(im, format=format)
img_tag = tag.format(options, format.lower(), b)
return img_tag
def load_control_images(src_dir):
image_dir = op.join(src_dir, "images")
ctrl_images = {}
for ch in range(1, 6):
im = load_image(image_dir, "H11", ch)
ctrl_images[ch] = img_tag(im, options='style="width: 250px;"')
return ctrl_images
def sanitize_filename(fn):
result = fn.replace(":", "_").replace(",", "_")
return result
def write(text, fn):
with open(fn, "w") as f:
f.write(text)
def write_page(page, title="Report", fn="index.html", templ=cprt.HTML_INTRO):
t = Template(templ + page + cprt.HTML_EXTRO)
result = t.substitute(title=title)
write(result, fn=fn)
def assign_colors(rec):
if "Toxic" in rec:
if rec["Toxic"]:
rec["Col_Toxic"] = cprt.COL_RED
else:
rec["Col_Toxic"] = cprt.COL_GREEN
else:
rec["Col_Toxic"] = cprt.COL_WHITE
if "Pure_Flag" in rec:
if rec["Pure_Flag"] == "Ok":
rec["Col_Purity"] = cprt.COL_GREEN
elif rec["Pure_Flag"] == "Warn":
rec["Col_Purity"] = cprt.COL_YELLOW
elif rec["Pure_Flag"] == "Fail":
rec["Col_Purity"] = cprt.COL_RED
else:
rec["Col_Purity"] = cprt.COL_WHITE
else:
rec["Col_Purity"] = cprt.COL_WHITE
if rec["Rel_Cell_Count"] >= LIMIT_CELL_COUNT_H:
rec["Col_Cell_Count"] = cprt.COL_GREEN
elif rec["Rel_Cell_Count"] >= LIMIT_CELL_COUNT_L:
rec["Col_Cell_Count"] = cprt.COL_YELLOW
else:
rec["Col_Cell_Count"] = cprt.COL_RED
if rec["Activity"] >= LIMIT_ACTIVITY_H:
rec["Col_Act"] = cprt.COL_GREEN
elif rec["Activity"] >= LIMIT_ACTIVITY_L:
rec["Col_Act"] = cprt.COL_YELLOW
else:
rec["Col_Act"] = cprt.COL_RED
if rec["Act_Flag"] == "active":
rec["Col_Act_Flag"] = cprt.COL_GREEN
else:
rec["Col_Act_Flag"] = cprt.COL_RED
def remove_colors(rec):
for k in rec.keys():
if k.startswith("Col_"):
rec[k] = cprt.COL_WHITE
def overview_report(df, cutoff=LIMIT_SIMILARITY_L / 100,
highlight=False, mode="cpd"):
"""mode `int` displays similarities not to references but to other internal compounds
(just displays the `Similarity` column)."""
cpp.load_resource("SIM_REFS")
sim_refs = cpp.SIM_REFS
detailed_cpds = []
if isinstance(df, cpp.DataSet):
df = df.data
if "ref" in mode:
act_cutoff = ACT_CUTOFF_PERC_REF
else:
act_cutoff = ACT_CUTOFF_PERC
t = Template(cprt.OVERVIEW_TABLE_HEADER)
if "int" in mode:
tbl_header = t.substitute(sim_entity="to another Test Compound")
else:
tbl_header = t.substitute(sim_entity="to a Reference")
report = [cprt.OVERVIEW_TABLE_INTRO, tbl_header]
row_templ = Template(cprt.OVERVIEW_TABLE_ROW)
idx = 0
for _, rec in df.iterrows():
idx += 1
well_id = rec["Well_Id"]
mol = mol_from_smiles(rec.get("Smiles", "*"))
rec["mol_img"] = mol_img_tag(mol)
rec["idx"] = idx
if "Pure_Flag" not in rec:
rec["Pure_Flag"] = "n.d."
rec["Act_Flag"] = "active"
rec["Max_Sim"] = ""
rec["Link"] = ""
rec["Col_Sim"] = cprt.COL_WHITE
has_details = True
if rec["Activity"] < act_cutoff:
has_details = False
rec["Act_Flag"] = "inactive"
# print(rec)
# similar references are searched for non-toxic compounds with an activity >= LIMIT_ACTIVITY_L
if rec["Activity"] < LIMIT_ACTIVITY_L or rec["Toxic"]:
similars_determined = False
else:
similars_determined = True
assign_colors(rec)
convert_bool(rec, "Toxic")
if has_details:
detailed_cpds.append(well_id)
details_fn = sanitize_filename(well_id)
if rec.get("Is_Ref", False):
plate = "references"
else:
plate = rec["Plate"]
rec["Link"] = '<a href="../{}/details/{}.html">Detailed<br>Report</a>'.format(plate, details_fn)
if similars_determined:
if "int" in mode:
# similar = {"Similarity": [rec["Similarity"]]}
similar = pd.DataFrame({"Well_Id": [well_id], "Similarity": [rec["Similarity"]]})
else:
similar = (sim_refs[sim_refs["Well_Id"] == well_id]
.sort_values("Similarity", ascending=False)
.reset_index())
if len(similar) > 0:
max_sim = round(
similar["Similarity"][0] * 100, 1) # first in the list has the highest similarity
rec["Max_Sim"] = max_sim
if max_sim >= LIMIT_SIMILARITY_H:
rec["Col_Sim"] = cprt.COL_GREEN
elif max_sim >= LIMIT_SIMILARITY_L:
rec["Col_Sim"] = cprt.COL_YELLOW
else:
rec["Col_Sim"] = cprt.COL_WHITE
print("ERROR: This should not happen (Max_Sim).")
else:
rec["Max_Sim"] = "< {}".format(LIMIT_SIMILARITY_L)
rec["Col_Sim"] = cprt.COL_RED
if not highlight:
# remove all coloring again:
remove_colors(rec)
report.append(row_templ.substitute(rec))
report.append(cprt.TABLE_EXTRO)
return "\n".join(report), detailed_cpds
def sim_ref_table(similar):
cpp.load_resource("REFERENCES")
df_refs = cpp.REFERENCES
table = [cprt.TABLE_INTRO, cprt.REF_TABLE_HEADER]
templ = Template(cprt.REF_TABLE_ROW)
for idx, rec in similar.iterrows():
rec = rec.to_dict()
ref_id = rec["Ref_Id"]
ref_data = df_refs[df_refs["Well_Id"] == ref_id]
if len(ref_data) == 0:
print(rec)
raise ValueError("BUG: ref_data should not be empty.")
ref_data = ref_data.copy()
ref_data = ref_data.fillna("—")
rec.update(ref_data.to_dict("records")[0])
mol = mol_from_smiles(rec.get("Smiles", "*"))
rec["Sim_Format"] = "{:.1f}".format(rec["Similarity"] * 100)
rec["Tan_Format"] = "{:.1f}".format(rec["Tanimoto"] * 100)
if rec["Tan_Format"] == np.nan:
rec["Tan_Format"] = "—"
rec["mol_img"] = mol_img_tag(mol)
rec["idx"] = idx + 1
link = sanitize_filename(rec["Well_Id"])
rec["link"] = link
row = templ.substitute(rec)
table.append(row)
table.append(cprt.TABLE_EXTRO)
return "\n".join(table)
def changed_parameters_table(act_prof, val, parameters=ACT_PROF_PARAMETERS):
changed = cpt.parameters_from_act_profile_by_val(act_prof, val, parameters=parameters)
table = []
templ = Template(cprt.PARM_TABLE_ROW)
for idx, p in enumerate(changed, 1):
p_elmnts = p.split("_")
p_module = p_elmnts[2]
p_name = "_".join(p_elmnts[1:])
rec = {
"idx": idx,
"Parameter": p_name,
"Help_Page": PARAMETER_HELP[p_module]
}
row = templ.substitute(rec)
table.append(row)
return "\n".join(table), changed
def parm_stats(parameters):
result = []
channels = ["_Mito", "_Ph_golgi", "_Syto", "_ER", "Hoechst"]
for ch in channels:
cnt = len([p for p in parameters if ch in p])
result.append(cnt)
return result
def parm_hist(increased, decreased, hist_cache):
# try to load histogram from cache:
if op.isfile(hist_cache):
result = open(hist_cache).read()
return result
labels = [
"Mito",
"Golgi / Membrane",
"RNA / Nucleoli",
"ER",
"Nuclei"
]
inc_max = max(increased)
dec_max = max(decreased)
max_total = max([inc_max, dec_max])
if max_total == 0:
result = "No compartment-specific parameters were changed."
return result
inc_norm = [v / max_total for v in increased]
dec_norm = [v / max_total for v in decreased]
n_groups = 5
dpi = 96
# plt.rcParams['axes.titlesize'] = 25
plt.style.use("seaborn-white")
plt.style.use("seaborn-pastel")
plt.style.use("seaborn-talk")
plt.rcParams['axes.labelsize'] = 25
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['legend.fontsize'] = 20
size = (1500, 1000)
figsize = (size[0] / dpi, size[1] / dpi)
fig, ax = plt.subplots(figsize=figsize)
index = np.arange(n_groups)
bar_width = 0.25
plt.bar(index, inc_norm, bar_width,
color='#94caef',
label='Inc')
plt.bar(index + bar_width, dec_norm, bar_width,
color='#ffdd1a',
label='Dec')
plt.xlabel('Cell Compartment')
plt.ylabel('rel. Occurrence')
plt.xticks(index + bar_width / 2, labels, rotation=45)
plt.legend()
plt.tight_layout()
img_file = IO()
plt.savefig(img_file, bbox_inches='tight', format="jpg")
result = img_tag(img_file, format="jpg", options='style="width: 800px;"')
img_file.close()
# important, otherwise the plots will accumulate and fill up memory:
plt.close()
open(hist_cache, "w").write(result) # cache the histogram
return result
def show_images(plate_quad, well):
"""For interactive viewing in the notebook."""
date = cprp.DATES[plate_quad]
src_dir = cprp.src_path.format(date, plate_quad)
ctrl_images = load_control_images(src_dir)
image_dir = op.join(src_dir, "images")
templ_dict = {}
for ch in range(1, 6):
im = load_image(image_dir, well, ch)
templ_dict["Img_{}_Cpd".format(ch)] = img_tag(im, options='style="width: 250px;"')
templ_dict["Img_{}_Ctrl".format(ch)] = ctrl_images[ch]
tbody_templ = Template(cprt.IMAGES_TABLE)
table = cprt.TABLE_INTRO + tbody_templ.substitute(templ_dict) + cprt.HTML_EXTRO
return HTML(table)
def detailed_report(rec, src_dir, ctrl_images):
# print(rec)
cpp.load_resource("SIM_REFS")
sim_refs = cpp.SIM_REFS
date = time.strftime("%d-%m-%Y %H:%M", time.localtime())
image_dir = op.join(src_dir, "images")
well_id = rec["Well_Id"]
act_prof = rec["Act_Profile"]
inc_parm, changed = changed_parameters_table(act_prof, "2")
increased = parm_stats(changed)
dec_parm, changed = changed_parameters_table(act_prof, "0")
decreased = parm_stats(changed)
mol = mol_from_smiles(rec.get("Smiles", "*"))
if "Pure_Flag" not in rec:
rec["Pure_Flag"] = "n.d."
templ_dict = rec.copy()
cache_path = op.join("histograms", rec["Plate"])
os.makedirs(cache_path, exist_ok=True)
hc_fn = sanitize_filename(rec["Well_Id"] + ".txt")
hist_cache = op.join(cache_path, hc_fn)
templ_dict["Date"] = date
templ_dict["mol_img"] = mol_img_tag(mol, options='class="cpd_image"')
templ_dict["Inc_Parm_Table"] = inc_parm
templ_dict["Dec_Parm_Table"] = dec_parm
templ_dict["parm_hist"] = parm_hist(increased, decreased, hist_cache)
if "Known_Act" in templ_dict:
if templ_dict["Trivial_Name"] == np.nan or templ_dict["Trivial_Name"] == "":
templ_dict["Trivial_Name"] = "—"
if templ_dict["Trivial_Name"] == np.nan or templ_dict["Known_Act"] == "":
templ_dict["Known_Act"] = "—"
t = Template(cprt.DETAILS_REF_ROW)
templ_dict["Reference"] = t.substitute(templ_dict)
else:
templ_dict["Reference"] = ""
well = rec["Metadata_Well"]
for ch in range(1, 6):
im = load_image(image_dir, well, ch)
templ_dict["Img_{}_Cpd".format(ch)] = img_tag(im, options='style="width: 250px;"')
templ_dict["Img_{}_Ctrl".format(ch)] = ctrl_images[ch]
if rec["Activity"] < LIMIT_ACTIVITY_L:
templ_dict["Ref_Table"] = "Because of low induction (< {}%), no similarity was determined.".format(LIMIT_ACTIVITY_L)
elif rec["Rel_Cell_Count"] < LIMIT_CELL_COUNT_L:
templ_dict["Ref_Table"] = "Because of compound toxicity, no similarity was determined."
else:
similar = (sim_refs[sim_refs["Well_Id"] == well_id]
.sort_values("Similarity", ascending=False)
.reset_index())
if len(similar) > 0:
ref_tbl = sim_ref_table(similar)
templ_dict["Ref_Table"] = ref_tbl
else:
templ_dict["Ref_Table"] = "No similar references found."
t = Template(cprt.DETAILS_TEMPL)
report = t.substitute(templ_dict)
return report
def full_report(df, src_dir, report_name="report", plate=None,
cutoff=0.6, act_cutoff=ACT_CUTOFF_PERC, highlight=False, mode="cpd"):
overview_fn = op.join(report_name, "index.html")
date = time.strftime("%d-%m-%Y %H:%M", time.localtime())
cpt.create_dirs(op.join(report_name, "details"))
if isinstance(df, cpp.DataSet):
df = df.data
print("* creating overview...")
header = "{}\n<h2>Cell Painting Overview Report</h2>\n".format(cprt.LOGO)
title = "Overview"
if plate is not None:
title = plate
header += "<h3>Plate {}</h3>\n".format(plate)
header += "<p>({})</p>\n".format(date)
if highlight:
highlight_legend = cprt.HIGHLIGHT_LEGEND
else:
highlight_legend = ""
overview, detailed_cpds = overview_report(
df, cutoff=cutoff, highlight=highlight, mode=mode)
overview = header + overview + highlight_legend
write_page(overview, title=title, fn=overview_fn, templ=cprt.OVERVIEW_HTML_INTRO)
# print(detailed_cpds)
print("* creating detailed reports...")
print(" * loading control images...")
ctrl_images = load_control_images(src_dir)
print(" * writing individual reports...")
df_detailed = df[df["Well_Id"].isin(detailed_cpds)]
for _, rec in df_detailed.iterrows():
well_id = rec["Well_Id"]
fn = op.join(report_name, "details", "{}.html".format(sanitize_filename(well_id)))
title = "{} Details".format(well_id)
# similar = detailed_cpds[well_id]
details = detailed_report(rec, src_dir, ctrl_images)
write_page(details, title=title, fn=fn, templ=cprt.DETAILS_HTML_INTRO)
print("* done.")
return HTML('<a href="{}">{}</a>'.format(overview_fn, "Overview"))
| apahl/cellpainting | cellpainting/reporting.py | Python | mit | 19,241 | [
"RDKit"
] | b2a15e9fbdc0c7a734dbc74372b4c8bac0ba9bc7fb3cb6083a02772658535a19 |
import cProfile
import pstats
import math
import string
import sys
import struct
import matplotlib
import numpy as np
import scipy.ndimage
import scipy.stats as ss
import scipy.signal
import scipy as sp
import scipy.odr as odr
import glob
import os
import gzip
import tarfile
import shutil
import congrid
import astropy.io.ascii as ascii
import warnings
import subprocess
import photutils
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.convolution import Gaussian2DKernel
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import *
import astropy.io.fits as pyfits
import statmorph
import datetime
import setup_synthetic_images_mp as ssimp
def process_snapshot(subdirpath='.',mockimage_parameters=None,clobber=False, max=None, galaxy=None,seg_filter_label='NC-F200W',magsb_limits=[23.0,25.0,27.0,29.0],camindices=[0,1,2,3],do_idl=False,analyze=True,use_nonscatter=True,Np=2):
cwd = os.path.abspath(os.curdir)
os.chdir(subdirpath)
bbfile_list = np.sort(np.asarray(glob.glob('broadbandz.fits*'))) #enable reading .fits.gz files
print(bbfile_list)
if galaxy is not None:
thisbb = np.where(bbfile_list==galaxy)[0]
bbfile_list= bbfile_list[thisbb]
test_file = bbfile_list[0]
tf = pyfits.open(test_file)
print(tf.info())
print(tf['BROADBAND'].header.cards)
print(tf['SFRHIST'].header.get('star_adaptive_smoothing'))
print(tf['SFRHIST'].header.get('star_radius_factor'))
#this is critical for later
fils = tf['FILTERS'].data.field('filter')
print(fils)
filters_to_analyze = ['hst/acs_f435w','hst/acs_f606w','hst/acs_f775w','hst/acs_f850lp',
'hst/wfc3_f105w','hst/wfc3_f125w','hst/wfc3_f160w',
'jwst/nircam_f070w', 'jwst/nircam_f090w','jwst/nircam_f115w', 'jwst/nircam_f150w',
'jwst/nircam_f200w', 'jwst/nircam_f277w', 'jwst/nircam_f356w', 'jwst/nircam_f444w',
'hst/wfc3_f140w',
'hst/wfc3_f275w', 'hst/wfc3_f336w',
'hst/acs_f814w',
'jwst/miri_F560W','jwst/miri_F770W','jwst/miri_F1000W','jwst/miri_F1130W',
'jwst/miri_F1280W','jwst/miri_F1500W','jwst/miri_F1800W','jwst/miri_F2100W','jwst/miri_F2550W']
skip_filter_boolean = [False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,]
print(filters_to_analyze)
pixsize_arcsec = [0.03,0.03,0.03,0.03,0.06,0.06,0.06,0.032,0.032,0.032,0.032,0.032,0.065,0.065,0.065,0.06,0.03,0.03,0.03,
0.11,0.11,0.11,0.11,0.11,0.11,0.11,0.11,0.11]
filter_labels = ['ACS-F435W','ACS-F606W','ACS-F775W','ACS-F850LP','WFC3-F105W','WFC3-F125W','WFC3-F160W',
'NC-F070W','NC-F090W','NC-F115W','NC-F150W','NC-F200W','NC-F277W','NC-F356W','NC-F444W',
'WFC3-F140W','WFC3-F275W','WFC3-F336W','ACS-F814W',
'MIRI-F560W','MIRI-F770W','MIRI-F1000W','MIRI-F1130W',
'MIRI-F1280W','MIRI-F1500W','MIRI-F1800W','MIRI-F2100W','MIRI-F2550W']
filter_indices = []
print(len(filters_to_analyze), len(skip_filter_boolean), len(filter_labels))
for i,f in enumerate(filters_to_analyze):
fi = np.where(fils==f)
print(fi[0][0], f, fils[fi[0][0]], filter_labels[i]) #, filters_to_analyze[fi]
filter_indices.append(fi[0][0])
filter_indices = np.asarray(filter_indices)
print(filter_indices)
#order of filter_labels in wavelength space (i.e, F435W is in the "2" position)
filter_lambda_order = [2,3,4,6,7,8,10,
11,12,13,14,15,16,17,18,
9,0,1,5,
19,20,21,22,
23,24,25,26,27]
#photfnu units Jy; flux in 1 ct/s
photfnu_Jy = [1.96e-7,9.17e-8,1.97e-7,4.14e-7,
1.13e-7,1.17e-7,1.52e-7,
5.09e-8,3.72e-8,3.17e-8,2.68e-8,2.64e-8,2.25e-8,2.57e-8,2.55e-8,
9.52e-8,8.08e-7,4.93e-7,1.52e-7,
5.75e-8,3.10e-8,4.21e-8,1.39e-7,
4.65e-8,4.48e-8,5.88e-8,4.98e-8,1.15e-7]
morphcode_dir = "/Users/gsnyder/Documents/pro/morph_december2013/morph_pro/"
morphcode_files = np.asarray(glob.glob(os.path.join(morphcode_dir,"*.*")))
#se_dir = '/Users/gsnyder/Documents/Projects/Illustris_Morphology/Illustris-CANDELS/SE_scripts'
#se_files = np.asarray(glob.glob(os.path.join(se_dir,"*.*")))
psf_files = []
psf_dir = os.path.expandvars('$GFS_PYTHON_CODE/vela-yt-sunrise/kernels')
#psf_names = ['PSFSTD_ACSWFC_F435W.fits','PSFSTD_ACSWFC_F606W.fits','PSFSTD_ACSWFC_F775W_SM3.fits','PSFSTD_ACSWFC_F850L_SM3.fits',
# 'PSFSTD_WFC3IR_F105W.fits','PSFSTD_WFC3IR_F125W.fits','PSFSTD_WFC3IR_F160W.fits',
# 'PSF_NIRCam_F070W_revV-1.fits','PSF_NIRCam_F090W_revV-1.fits','PSF_NIRCam_F115W_revV-1.fits','PSF_NIRCam_F150W_revV-1.fits',
# 'PSF_NIRCam_F200W_revV-1.fits','PSF_NIRCam_F277W_revV-1.fits','PSF_NIRCam_F356W_revV-1.fits','PSF_NIRCam_F444W_revV-1.fits',
# 'PSFSTD_WFC3IR_F140W.fits','PSFSTD_WFC3UV_F275W.fits','PSFSTD_WFC3UV_F336W.fits','PSFSTD_ACSWFC_F814W.fits']
psf_names = ['TinyTim_IllustrisPSFs/F435W_rebin.fits','TinyTim_IllustrisPSFs/F606W_rebin.fits','TinyTim_IllustrisPSFs/F775W_rebin.fits','TinyTim_IllustrisPSFs/F850LP_rebin.fits',
'TinyTim_IllustrisPSFs/F105W_rebin.fits','TinyTim_IllustrisPSFs/F125W_rebin.fits','TinyTim_IllustrisPSFs/F160W_rebin.fits',
'WebbPSF_F070W_trunc.fits','WebbPSF_F090W_trunc.fits','WebbPSF_F115W_trunc.fits','WebbPSF_F150W_trunc.fits',
'WebbPSF_F200W_trunc.fits','WebbPSF_F277W_trunc.fits','WebbPSF_F356W_trunc.fits','WebbPSF_F444W_trunc.fits',
'TinyTim_IllustrisPSFs/F140W_rebin.fits','TinyTim_IllustrisPSFs/F275W_rebin.fits','TinyTim_IllustrisPSFs/F336W_rebin.fits','TinyTim_IllustrisPSFs/F814W_rebin.fits',
'WebbPSF_F560W_trunc.fits','WebbPSF_F770W_trunc.fits','WebbPSF_F1000W_trunc.fits','WebbPSF_F1130W_trunc.fits',
'WebbPSF_F1280W_trunc.fits','WebbPSF_F1500W_trunc.fits','WebbPSF_F1800W_trunc.fits','WebbPSF_F2100W_trunc.fits','WebbPSF_F2550W_trunc.fits']
#psf_pix_arcsec = [0.0125,0.0125,0.0125,0.0125,0.0325,0.0325,0.0325,0.007925,0.007925,0.007925,0.007925,0.007925,0.0162,0.0162,0.0162,0.0325,0.0100,0.0100,0.0125]
#switch to JWST detector sampling for efficiency. They're model psfs anyway, full accuracy not essential
psf_pix_arcsec = [0.03,0.03,0.03,0.03,0.06,0.06,0.06,0.0317,0.0317,0.0317,0.0317,0.0317,0.0648,0.0648,0.0648,0.06,0.03,0.03,0.03,0.11,0.11,0.11,0.11,0.11,0.11,0.11,0.11,0.11]
psf_truncate = [None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None]
psf_hdu_num = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
psf_fwhm = [0.10,0.11,0.12,0.13,0.14,0.17,0.20,0.11,0.11,0.11,0.11,0.12,0.15,0.18,0.25,0.18,0.07,0.08,0.13,
0.035*5.61,0.035*7.57,0.035*9.90,0.035*11.30,0.035*12.75,0.035*14.96,0.035*17.90,0.035*20.65,0.035*25.11]
#these settings yield full subhalo (4 cams) convolution in 0.92s! convolve_fft ftw!
for pname in psf_names:
psf_file = os.path.join(psf_dir,pname)
psf_files.append(psf_file)
print(psf_file, os.path.lexists(psf_file))
### PSFSTD; WFC3 = 0.06 arcsec, ACS = 0.03 arcsec... I think
### NIRCAM in header with keyword 'PIXELSCL'; short 0.07925 long 0.0162
## acs wfc 0.05 arcsec pixels... PSFSTD x4 oversample?
## wfc3 ir 0.13 arcsec
## wfc3 uv 0.04 arcsec
mockimage_parameters = ssimp.analysis_parameters('mockimage_default')
mockimage_parameters.filter_indices = filter_indices
mockimage_parameters.filter_labels = filter_labels
mockimage_parameters.pixsize_arcsec = pixsize_arcsec
mockimage_parameters.morphcode_base = morphcode_dir
mockimage_parameters.morphcode_files = morphcode_files
#mockimage_parameters.se_base = se_dir
#mockimage_parameters.se_files = se_files
mockimage_parameters.camera_indices = camindices #None #by default, do all
mockimage_parameters.psf_files = psf_files
mockimage_parameters.psf_pix_arcsec = psf_pix_arcsec
mockimage_parameters.psf_truncate = psf_truncate
mockimage_parameters.psf_hdu_num = psf_hdu_num
mockimage_parameters.magsb_limits = magsb_limits
mockimage_parameters.psf_fwhm_arcsec = psf_fwhm
mockimage_parameters.photfnu_Jy = photfnu_Jy
mockimage_parameters.filter_lambda_order = filter_lambda_order
mockimage_parameters.skip_filters = skip_filter_boolean
mockimage_parameters.use_nonscatter = use_nonscatter
#use exactly one detection and segmentation per object, depending on redshift
#enormous simplification
#observationally, go w deepest filter. here... ?
mockimage_parameters.segment_filter_label = seg_filter_label
mockimage_parameters.segment_filter_index = np.where(np.asarray(mockimage_parameters.filter_labels) == seg_filter_label)[0][0]
print(mockimage_parameters.segment_filter_label)
print(mockimage_parameters.segment_filter_index)
assert(len(psf_pix_arcsec)==len(pixsize_arcsec))
assert(len(filter_labels)==len(mockimage_parameters.psf_files))
bbdirs = []
for i,bbfile in enumerate(bbfile_list):
try:
bbdir = ssimp.process_single_broadband(bbfile,mockimage_parameters,clobber=clobber,do_idl=do_idl,analyze=analyze,bbase="broadbandz",Np=Np)
bbdirs.append(bbdir)
except (KeyboardInterrupt,NameError,AttributeError,KeyError,TypeError,IndexError) as e:
print(e)
raise
except:
print("Exception while processing broadband: ", bbfile)
print("Error:", sys.exc_info()[0])
else:
print("Successfully processed broadband: ", bbfile)
os.chdir(cwd)
return bbdirs
if __name__=="__main__":
#res = process_snapshot(subdirpath='.',clobber=False,seg_filter_label='NC-F200W',magsb_limits=[25.0,27.0],camindices=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18],do_idl=False,analyze=True,use_nonscatter=False,Np=4)
res = process_snapshot(subdirpath='.',clobber=False,seg_filter_label='NC-F200W',magsb_limits=[25.0,27.0],camindices=[0,1,2,3,4,5,6,7,8,9],do_idl=False,analyze=True,use_nonscatter=True,Np=4)
| gsnyder206/synthetic-image-morph | candelize_enzo.py | Python | gpl-2.0 | 11,398 | [
"Galaxy"
] | 613027250d2d27b4f7fc5587f3ffcff9eb196b403376bb3977f5557e32b4288c |
"""
Copyright (c) 2015 Andreea Georgescu
Created on Sun Mar 1 19:51:42 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from experiment import *
from experiment_HaloIndep_er import *
from math import *
from globalfnc import *
import parallel_map as par
from scipy.linalg import det, inv
# from scipy.special import lambertw
from lambertw import *
from scipy.optimize import brentq
import os
from scipy.stats import poisson
import numpy.random as random
class Experiment_HaloIndep(Experiment):
""" Base class that implements the halo-independent analysis common to all
experiments, using vmin as independent variable in the integration.
Input:
exper_name: string
Name of experiment.
scattering_type: string
Type of scattering. Can be
- 'SI' (spin-independent)
- 'SDAV' (spin-independent, axial-vector)
- 'SDPS' (spin-independent, pseudo-scalar)
mPhi: float, optional
The mass of the mediator. If not given, it corresponds to contact interaction.
"""
def __init__(self, exper_name, scattering_type, mPhi=mPhiRef):
Experiment.__init__(self, exper_name, scattering_type, mPhi)
if self.energy_resolution_type == "Dirac":
self.Response = self._Response_Dirac
else:
self.Response = self._Response_Finite
def DifferentialResponse(self, Eee, qER, const_factor):
""" Differential response function d**2 R / (d Eee d ER)
NOT including the velocity integral eta0
Input:
Eee: float or ndarray
Measured energy (electron equivalent).
qER: float or ndarray
q * ER for quenching factor q and recoil energy ER.
const_factor: ndarray
Factors entering the differential response that do not depend on Eee.
"""
self.count_diffresponse_calls += 1
r_list = const_factor * self.Efficiency(Eee) * \
np.array([self.ResolutionFunction(Eee, qer, self.EnergyResolution(qer))
for qer in qER])
return r_list.sum()
def ConstFactor(self, vmin, mx, fp, fn, delta, sign):
""" Collects the factors that don't depend on the measured energy Eee,
so they only need to be computed once in Response function.
Returns:
(ER, qER, const_factor): tuple
"""
ER = ERecoilBranch(vmin, self.mT, mx, delta, sign)
q = self.QuenchingFactor(ER)
qER = q * ER
efficiencyER = self.Efficiency_ER(ER)
const_factor = kilogram/SpeedOfLight**2 * \
self.CrossSectionFactors(ER, mx, fp, fn, delta) * \
np.abs(dERecoildVmin(vmin, self.mT, mx, delta, sign)) * efficiencyER
return (ER, qER, const_factor)
def DifferentialResponse_Full(self, vmin, Eee, mx, fp, fn, delta, sign):
""" Differential response function d**2 R / (d Eee d ER)
NOT including the velocity integral eta0
Same as DifferentialResponse, but computed given full input parameters,
instead of the pre-computed const_factor.
"""
(ER, qER, const_factor) = self.ConstFactor(vmin, mx, fp, fn, delta, sign)
return self.DifferentialResponse(Eee, qER, const_factor)
def _Response_Finite(self, vmin, Eee1, Eee2, mx, fp, fn, delta):
""" Response function integral d**2 R / (d Eee d ER) between measured energies
Eee1 and Eee2.
NOT including eta0.
For any finite resolution function (i.e. other than Dirac Delta).
"""
self.count_response_calls += 1
if delta == 0:
branches = [1]
else:
branches = [1, -1]
result = 0
for sign in branches:
(ER, qER, const_factor) = self.ConstFactor(vmin, mx, fp, fn, delta, sign)
result += integrate.quad(self.DifferentialResponse, Eee1, Eee2,
args=(qER, const_factor),
epsrel=PRECISSION, epsabs=0)[0]
if result >= 0:
return result
return 0
def _Response_Dirac(self, vmin, Eee1, Eee2, mx, fp, fn, delta):
""" Response function integral d**2 R / (d Eee d ER) between measured energies
Eee1 and Eee2.
NOT including eta0.
For Dirac Delta resolution function.
"""
self.count_response_calls += 1
if delta == 0:
branches = [1]
else:
branches = [1, -1]
r_list_sum = 0
for sign in branches:
ER = ERecoilBranch(vmin, self.mT, mx, delta, sign)
q = self.QuenchingFactor(ER)
qER = q * ER
integrated_delta = np.array([1. if Eee1 <= i < Eee2 else 0. for i in qER])
try:
efficiencyEee = self.Efficiency(Eee1, qER)
except TypeError:
efficiencyEee = 1.
# efficiencyER = self.Efficiency_ER(qER)
efficiencyER = np.array(list(map(self.Efficiency_ER, qER)))
r_list = kilogram/SpeedOfLight**2 * \
self.CrossSectionFactors(ER, mx, fp, fn, delta) * \
np.abs(dERecoildVmin(vmin, self.mT, mx, delta, sign)) * \
efficiencyEee * efficiencyER * integrated_delta
r_list_sum += r_list.sum()
return r_list_sum
def IntegratedResponse(self, vmin1, vmin2, Eee1, Eee2, mx, fp, fn, delta):
""" Integrated Response Function between measured energies Eee1 and Eee2,
and all recoil energies ER.
NOT including eta0.
For any finite resolution function (i.e. other than Dirac Delta).
"""
midpoints = []
integr = integrate.quad(self.Response, vmin1, vmin2,
args=(Eee1, Eee2, mx, fp, fn, delta), points=midpoints,
epsrel=PRECISSION, epsabs=0)
return integr[0]
def IntegratedResponseTable(self, vmin_list, E1, E2, mx, fp, fn, delta):
return np.array([self.IntegratedResponse(vmin_list[a], vmin_list[a+1],
E1, E2, mx, fp, fn, delta)
for a in range(0, vmin_list.size - 1)])
class MaxGapExperiment_HaloIndep(Experiment_HaloIndep):
""" Class for experiments using the Maximum Gap Method.
Input:
exper_name: string
Name of experiment.
scattering_type: string
Type of scattering.
mPhi: float, optional
Mass of the mediator.
quenching_factor: float, optional
Quenching factor. If not given, the default used is specified in the data
modules.
"""
def __init__(self, exper_name, scattering_type, mPhi=mPhiRef, quenching_factor=None):
super().__init__(exper_name, scattering_type, mPhi)
module = import_file(INPUT_DIR + exper_name + ".py")
self.ERecoilList = module.ERecoilList
self.ElistMaxGap = np.append(np.insert(
np.array(list(filter(lambda x: self.Ethreshold < x < self.Emaximum,
self.ERecoilList))),
0, self.Ethreshold), self.Emaximum)
def TabulateMaximumGapLimit(self, vmin1, vmin2, mx, fp, fn, delta):
print("vmin = ", vmin2)
return np.array(list(map(lambda i, j:
self.IntegratedResponse(vmin1, vmin2, i, j, mx, fp, fn, delta),
self.ElistMaxGap[:-1], self.ElistMaxGap[1:])))
def MaximumGapUpperBound(self, vmin_min, vmin_max, vmin_step, mx, fp, fn, delta,
output_file, processes=None):
vmin_list = np.linspace(vmin_min, vmin_max, (vmin_max - vmin_min)/vmin_step + 1)
vmin_list0 = np.insert(vmin_list, 0, 0.)
xtable = np.zeros(self.ElistMaxGap.size - 1)
upperlimit_table = np.array([])
kwargs = ({'vmin1': vmin_list0[v_index], 'vmin2': vmin_list[v_index],
'mx': mx, 'fp': fp, 'fn': fn, 'delta': delta}
for v_index in range(vmin_list.size))
xtable_list = par.parmap(self.TabulateMaximumGapLimit, kwargs, processes)
for v_index in range(vmin_list.size):
xtable += xtable_list[v_index]
mu_scaled = xtable.sum()
x_scaled = np.max(xtable)
if x_scaled == 0:
mu_over_x = np.inf
result = [np.inf]
else:
mu_over_x = mu_scaled / x_scaled
y_guess = np.real(-lambertw(-0.1 / mu_over_x, -1))
y = fsolve(lambda x: MaximumGapC0scaled(x, mu_over_x) - ConfidenceLevel,
y_guess)
result = y / x_scaled / self.Exposure
result = result[0]
print("vmin = ", vmin_list[v_index], " mu_over_x = ", mu_over_x)
print("xtable = ", xtable)
print("mu_over_x =", mu_over_x)
print("y_guess =", y_guess)
print("y =", y)
print("x_scaled =", x_scaled)
print("result = ", result)
to_print = np.log10(np.array([[mx, result]]))
with open(output_file, 'ab') as f_handle:
np.savetxt(f_handle, to_print)
upperlimit_table = np.append(upperlimit_table, [result])
return upperlimit_table
def UpperLimit(self, mx, fp, fn, delta, vmin_min, vmin_max, vmin_step,
output_file, processes=None, **unused_kwargs):
upper_limit = self.MaximumGapUpperBound(vmin_min, vmin_max, vmin_step, mx,
fp, fn, delta, output_file,
processes=processes)
vmin_list = np.linspace(vmin_min, vmin_max, (vmin_max - vmin_min)/vmin_step + 1)
print("vmin_list = ", vmin_list)
print("upper_limit = ", upper_limit)
result = np.transpose([vmin_list, np.log10(upper_limit)])
print("res = ", result)
return result[result[:, 1] != np.inf]
class PoissonExperiment_HaloIndep(Experiment_HaloIndep):
""" Class for experiments with Poisson analysis.
Input:
exper_name: string
Name of experiment.
scattering_type: string
Type of scattering.
mPhi: float, optional
Mass of the mediator.
quenching_factor: float, optional
Quenching factor. If not given, the default used is specified in the data
modules.
"""
def __init__(self, exper_name, scattering_type, mPhi=mPhiRef, quenching_factor=None):
super().__init__(exper_name, scattering_type, mPhi)
module = import_file(INPUT_DIR + exper_name + ".py")
self.Expected_limit = module.Expected_limit
def _PoissonUpperBound(self, vmin, mx, fp, fn, delta):
print('vmin =', vmin)
muT = self.mT * mx / (self.mT + mx)
Eee_max = max(2e6 * muT**2 * (vmin/SpeedOfLight)**2 / self.mT)
print("self.Ethreshold =", self.Ethreshold)
print("Eee_max =", Eee_max)
int_response = self.IntegratedResponse(0, vmin, self.Ethreshold, Eee_max,
mx, fp, fn, delta)
print("int_response =", int_response)
if int_response > 0:
result = np.log10(self.Expected_limit / self.Exposure / int_response)
else:
result = np.inf
return [vmin, result]
def UpperLimit(self, mx, fp, fn, delta, vmin_min, vmin_max, vmin_step,
output_file, processes=None, **unused_kwargs):
vmin_list = np.linspace(vmin_min, vmin_max, (vmin_max - vmin_min)/vmin_step + 1)
kwargs = ({'vmin': vmin, 'mx': mx, 'fp': fp, 'fn': fn, 'delta': delta}
for vmin in vmin_list)
upper_limit = np.array(par.parmap(self._PoissonUpperBound, kwargs, processes))
upper_limit = upper_limit[upper_limit[:, 1] != np.inf]
print("upper_limit = ", upper_limit)
with open(output_file, 'ab') as f_handle:
np.savetxt(f_handle, upper_limit)
return upper_limit
class GaussianExperiment_HaloIndep(Experiment_HaloIndep):
""" Class for experiments with Gaussian analysis.
Input:
exper_name: string
Name of experiment.
scattering_type: string
Type of scattering.
mPhi: float, optional
Mass of the mediator.
quenching_factor: float, optional
Quenching factor. If not given, the default used is specified in the data
modules.
"""
def __init__(self, exper_name, scattering_type, mPhi=mPhiRef, quenching_factor=None):
super().__init__(exper_name, scattering_type, mPhi)
module = import_file(INPUT_DIR + exper_name + ".py")
self.BinEdges_left = module.BinEdges_left
self.BinEdges_right = module.BinEdges_right
self.BinData = module.BinData
self.BinError = module.BinError
self.BinSize = module.BinSize
self.chiSquared = chi_squared(self.BinData.size)
self.Expected_limit = module.Expected_limit * self.BinSize
if quenching_factor is not None:
self.QuenchingFactor = lambda e: quenching_factor
print('BinData', self.BinData)
print('BinError', self.BinError)
# If you want to calculate the limit as is done in fig 2 of arxiv 1409.5446v2
# self.BinData = module.BinData/module.Exposure
# self.chiSquared = chi_squared(self.BinData.size)
# self.Expected_limit = module.Expected_limit * self.BinSize
def _GaussianUpperBound(self, vmin, mx, fp, fn, delta):
int_response = \
np.array(list(map(lambda i, j:
self.IntegratedResponse(0, vmin, i, j, mx, fp, fn, delta),
self.BinEdges_left, self.BinEdges_right)))
result = [i for i in self.Expected_limit / int_response if i > 0]
result = np.min(result)
if result > 0:
result = np.log10(result)
else:
result = np.inf
print("(vmin, result) =", (vmin, result))
return [vmin, result]
def UpperLimit(self, mx, fp, fn, delta, vmin_min, vmin_max, vmin_step,
output_file, processes=None, **unused_kwargs):
vmin_list = np.linspace(vmin_min, vmin_max, (vmin_max - vmin_min)/vmin_step + 1)
kwargs = ({'vmin': vmin, 'mx': mx, 'fp': fp, 'fn': fn, 'delta': delta}
for vmin in vmin_list)
upper_limit = np.array(par.parmap(self._GaussianUpperBound, kwargs, processes))
upper_limit = upper_limit[upper_limit[:, 1] != np.inf]
print("upper_limit = ", upper_limit)
with open(output_file, 'ab') as f_handle:
np.savetxt(f_handle, upper_limit)
return upper_limit
def _MinusLogLikelihood(self, vars_list, mx, fp, fn, delta,
vminStar=None, logetaStar=None, vminStar_index=None):
""" Compute -log(L)
Input:
vars_list: ndarray
List of variables [vmin_1, ..., vmin_No, log(eta_1), ..., log(eta_No)]
vminStar, logetaStar: float, optional
Values of fixed vmin^* and log(eta)^*.
Returns:
-2log(L): float
"""
if vminStar is None:
vmin_list_w0 = vars_list[: vars_list.size/2]
logeta_list = vars_list[vars_list.size/2:]
else:
vmin_list_w0 = np.insert(vars_list[: vars_list.size/2],
vminStar_index, vminStar)
logeta_list = np.insert(vars_list[vars_list.size/2:],
vminStar_index, logetaStar)
vmin_list_w0 = np.insert(vmin_list_w0, 0, 0)
rate_partials = [None] * (self.BinEdges_left.size)
for x in range(0, self.BinEdges_left.size):
resp_integr = self.IntegratedResponseTable(vmin_list_w0,
self.BinEdges_left[x],
self.BinEdges_right[x],
mx, fp, fn, delta)
rate_partials[x] = np.dot(10**logeta_list, resp_integr)
result = 0
if self.energy_resolution_type == "Dirac":
self.Response = self._Response_Dirac
else:
self.Response = self._Response_Finite
for x in range(0, self.BinData.size):
if self.BinData[x] != 0:
result += ((rate_partials[x] - self.BinData[x] / self.Exposure) ** 2.0 /
(self.BinError[x] ** 2.0))
return result
def KKT_Condition_Q(self, vars_list, mx, fp, fn, delta,
vminStar=None, logetaStar=None, vminStar_index=None):
"""This is intended to calculate the contribution to q(vmin) from a particular experiment.
It may currently have issues, has not been tested.
"""
if vminStar is None:
vmin_list_w0 = vars_list[: vars_list.size/2]
logeta_list = vars_list[vars_list.size/2:]
vmin_list_w0 = np.insert(vmin_list_w0, 0, 0)
self.curly_H_tab = np.zeros((self.BinData.size, 1001))
self.Q_contrib = np.zeros((self.BinData.size, 1001))
rate_partials = [None] * (self.BinEdges_left.size)
for x in range(0, self.BinEdges_left.size):
resp_integr = self.IntegratedResponseTable(vmin_list_w0,
self.BinEdges_left[x],
self.BinEdges_right[x],
mx, fp, fn, delta)
rate_partials[x] = np.dot(10**logeta_list, resp_integr)
if self.energy_resolution_type == "Dirac":
self.Response = self._Response_Dirac
else:
self.Response = self._Response_Finite
result = np.zeros((self.BinData.size, 1001))
# TODO parallelize this section of the code
calculate_Q = False
if calculate_Q:
for x in range(0, self.BinData.size):
for v_dummy in range(1, 1001):
self.curly_H_tab[x, v_dummy] =\
integrate.quad(self.Response, min(VminDelta(self.mT, mx, delta)),
v_dummy,args=(self.BinEdges_left[x],
self.BinEdges_right[x], mx,
fp, fn, delta),
epsrel=PRECISSION, epsabs=0)[0]
if self.BinData[x] != 0:
result[x, v_dummy] = (2.0 * ((rate_partials[x] - self.BinData[x] /
self.Exposure) / self.BinError[x] ** 2.0)
* self.curly_H_tab[x, v_dummy])
self.Q_contrib[x, v_dummy] = result[x, v_dummy]
file = Output_file_name(self.name, self.scattering_type, self.mPhi, mx, fp, fn, delta,
F, "_KKT_Cond_1", "../Output_Band/") + ".dat"
f_handle = open(file, 'wb') # clear the file first
np.savetxt(f_handle, self.Q_contrib)
f_handle.close()
else:
file = Output_file_name(self.name, self.scattering_type, self.mPhi, mx, fp, fn, delta,
F, "_KKT_Cond_1", "../Output_Band/") + ".dat"
f_handle = open(file, 'rb')
self.Q_contrib = np.loadtxt(f_handle)
f_handle.close()
print('Obtained Variational of Likelihood')
return self.Q_contrib
class Poisson_Likelihood(Experiment_HaloIndep):
""" Class for multi EHI method experiments with binned possoin analysis.
Input:
exper_name: string
Name of experiment.
scattering_type: string
Type of scattering.
mPhi: float, optional
Mass of the mediator.
quenching_factor: float, optional
Quenching factor. If not given, the default used is specified in the data
modules.
"""
def __init__(self, exper_name, scattering_type, mPhi=mPhiRef, quenching_factor=None):
super().__init__(exper_name, scattering_type, mPhi)
module = import_file(INPUT_DIR + exper_name + ".py")
self.BinEdges_left = module.BinEdges_left
self.BinEdges_right = module.BinEdges_right
self.BinData = module.BinData
self.BinSize = module.BinSize
self.BinBkgr = module.BinBkgr
self.BinExposure = module.BinExposure
self.Expected_limit = module.Expected_limit / self.BinExposure
self.mT_avg = np.sum(module.target_nuclide_AZC_list[:, 2] * module.target_nuclide_mass_list)
print('BinData', self.BinData)
def UpperLimit(self, mx, fp, fn, delta, vmin_min, vmin_max, vmin_step,
output_file, processes=None, **unused_kwargs):
vmin_list = np.linspace(vmin_min, vmin_max, (vmin_max - vmin_min)/vmin_step + 1)
kwargs = ({'vmin': vmin, 'mx': mx, 'fp': fp, 'fn': fn, 'delta': delta}
for vmin in vmin_list)
upper_limit = np.array(par.parmap(self._PoissonUpperBound, kwargs, processes))
upper_limit = upper_limit[upper_limit[:, 1] != np.inf]
print("upper_limit = ", upper_limit)
with open(output_file, 'ab') as f_handle:
np.savetxt(f_handle, upper_limit)
return upper_limit
def _PoissonUpperBound(self, vmin, mx, fp, fn, delta):
int_response = \
np.array(list(map(lambda i,j:
self.IntegratedResponse(0, vmin, i, j, mx, fp, fn, delta),
self.BinEdges_left, self.BinEdges_right)))
result = [i for i in self.Expected_limit / int_response if i > 0]
result = np.min(result)
if result > 0:
result = np.log10(result)
else:
result = np.inf
print("(vmin, result) =", (vmin, result))
return [vmin, result]
def ExpectedNumEvents(self, minfunc, mx, fp, fn, delta):
vmin_list_w0 = minfunc[:(minfunc.size / 2)]
logeta_list = minfunc[(minfunc.size / 2):]
vmin_list_w0 = np.insert(vmin_list_w0, 0, 0)
resp_integr = self.IntegratedResponseTable(vmin_list_w0,
self.BinEdges_left[0],
self.BinEdges_right[0],
mx, fp, fn, delta)
Nsignal = self.BinExposure * np.dot(10**logeta_list, resp_integr)
return Nsignal
def Simulate_Events(self, Nexpected, minfunc, class_name, mx, fp, fn, delta):
Totexpected = Nexpected + self.BinBkgr[0]
Nevents = poisson.rvs(Totexpected)
vdelta=min(VminDelta(self.mT, mx, delta))
vmin_list_w0 = minfunc[:(minfunc.size / 2)]
logeta_list = minfunc[(minfunc.size / 2):]
eta_list = np.insert(logeta_list,0,-1)
vmin_list_w0 = np.insert(vmin_list_w0, 0, 0)
vmin_grid = np.linspace(vdelta, vmin_list_w0[-1], 1000)
x_run = 0
resp_integr = np.zeros(len(vmin_grid))
for vmin_ind in range(len(vmin_grid)):
if vmin_grid[vmin_ind] < (vmin_list_w0[x_run+1]):
resp_integr[vmin_ind] = 10**eta_list[x_run] *\
self.Response(vmin_grid[vmin_ind],self.BinEdges_left[0],
self.BinEdges_right[0], mx, fp, fn, delta)
else:
x_run+=1
resp_integr[vmin_ind] = 10**eta_list[x_run] *\
self.Response(vmin_grid[vmin_ind],self.BinEdges_left[0],
self.BinEdges_right[0], mx, fp, fn, delta)
if Nevents > 0:
pdf = resp_integr / np.sum(resp_integr)
cdf = pdf.cumsum()
u = random.rand(Nevents)
Q = np.zeros(Nevents)
for i in np.arange(Nevents):
Q[i] = vmin_grid[np.absolute(cdf - u[i]).argmin()]
Q = np.sort(Q)
else:
Q = np.array([])
Nevents = 0
Nexpected = 0
print('Events expected: ', Totexpected, 'Events Simulated: ', Nevents)
print('Events: ', Q)
self.BinData = np.array([Q.size])
return Q
def _MinusLogLikelihood(self, vars_list, mx, fp, fn, delta,
vminStar=None, logetaStar=None, vminStar_index=None):
""" Compute -log(L)
Input:
vars_list: ndarray
List of variables [vmin_1, ..., vmin_No, log(eta_1), ..., log(eta_No)]
vminStar, logetaStar: float, optional
Values of fixed vmin^* and log(eta)^*.
Returns:
-2log(L): float
"""
if vminStar is None:
vmin_list_w0 = vars_list[: vars_list.size/2]
logeta_list = vars_list[vars_list.size/2:]
else:
vmin_list_w0 = np.insert(vars_list[: vars_list.size/2],
vminStar_index, vminStar)
logeta_list = np.insert(vars_list[vars_list.size/2:],
vminStar_index, logetaStar)
vmin_list_w0 = np.insert(vmin_list_w0, 0, 0)
rate_partials = [None] * (self.BinEdges_left.size)
for x in range(0, self.BinEdges_left.size):
resp_integr = self.IntegratedResponseTable(vmin_list_w0,
self.BinEdges_left[x],
self.BinEdges_right[x],
mx, fp, fn, delta)
rate_partials[x] = np.dot(10**logeta_list, resp_integr)
if rate_partials[x] < 0:
rate_partials[x] = 0.0
result = 0
if self.energy_resolution_type == "Dirac":
self.Response = self._Response_Dirac
else:
self.Response = self._Response_Finite
for x in range(0, self.BinData.size):
result += 2.0 * (self.BinExposure[x] * rate_partials[x] + self.BinBkgr[x] +
log(factorial(self.BinData[x])) - self.BinData[x] *
log(self.BinExposure[x] * rate_partials[x] + self.BinBkgr[x]))
return result
def KKT_Condition_Q(self, vars_list, mx, fp, fn, delta,
vminStar=None, logetaStar=None, vminStar_index=None):
"""This is intended to calculate the contribution to q(vmin) from a particular experiment.
It may currently have issues, has not been tested.
"""
if vminStar is None:
vmin_list_w0 = vars_list[: vars_list.size/2]
logeta_list = vars_list[vars_list.size/2:]
else:
vmin_list_w0 = np.insert(vars_list[: vars_list.size/2],
vminStar_index, vminStar)
logeta_list = np.insert(vars_list[vars_list.size/2:],
vminStar_index, logetaStar)
vmin_list_w0 = np.insert(vmin_list_w0, 0, 0)
self.curly_H_tab = np.zeros((self.BinData.size, 1001))
self.Q_contrib = np.zeros((self.BinData.size, 1001))
rate_partials = [None] * (self.BinEdges_left.size)
for x in range(0, self.BinEdges_left.size):
resp_integr = self.IntegratedResponseTable(vmin_list_w0,
self.BinEdges_left[x],
self.BinEdges_right[x],
mx, fp, fn, delta)
rate_partials[x] = np.dot(10**logeta_list, resp_integr)
if self.energy_resolution_type == "Dirac":
self.Response = self._Response_Dirac
else:
self.Response = self._Response_Finite
result = np.zeros((self.BinData.size, 1001))
# TODO parallelize this section of the code
calculate_Q = False
if calculate_Q:
for x in range(0, self.BinData.size):
for v_dummy in range(1, 1001):
self.curly_H_tab[x, v_dummy] = integrate.quad(self.Response,
min(VminDelta(self.mT, mx,
delta)),
v_dummy,
args=(self.BinEdges_left[x],
self.BinEdges_right[x],
mx, fp, fn, delta),
epsrel=PRECISSION, epsabs=0)[0]
result[x, v_dummy] = (2.0 * ((self.BinExposure[x] * rate_partials[x] +
self.BinBkgr[x] - self.BinData[x]) /
(rate_partials[x] + self.BinBkgr[x] /
self.BinExposure[x])) *
self.curly_H_tab[x, v_dummy])
self.Q_contrib[x, v_dummy] = result[x, v_dummy]
file = Output_file_name(self.name, self.scattering_type, self.mPhi, mx, fp, fn, delta,
F, "_KKT_Cond_1", "../Output_Band/") + ".dat"
f_handle = open(file, 'wb') # clear the file first
np.savetxt(f_handle, self.Q_contrib)
f_handle.close()
else:
file = Output_file_name(self.name, self.scattering_type, self.mPhi, mx, fp, fn, delta,
F, "_KKT_Cond_1", "../Output_Band/") + ".dat"
f_handle = open(file, 'rb')
self.Q_contrib = np.loadtxt(f_handle)
f_handle.close()
print('Obtained Variational of Likelihood')
return self.Q_contrib
def Constrained_likelihood(self, mx, fp, fn, delta, vminStar, logetaStar):
if delta == 0:
vmin_low = min(VMin(self.BinEdges_left, self.mT, mx, delta))
vd = 0.
elif delta < 0:
er_delta = np.abs(delta) * mx / (mx + np.mean(self.mT))
vd = 0.
if self.BinEdges_left > er_delta:
vmin_low = min(VMin(self.BinEdges_left, self.mT, mx, delta))
else:
vmin_low = 0.
elif delta > 0:
er_delta = np.abs(delta) * mx / (mx + np.mean(self.mT))
vd = np.sqrt(2. * delta * 10**(-6.) * (mx + np.mean(self.mT)) /
(mx * np.mean(self.mT))) * SpeedOfLight
if self.BinEdges_left > er_delta:
vmin_low = min(VMin(self.BinEdges_left, self.mT, mx, delta))
else:
vmin_low = vd
if vminStar > vmin_low:
mu_max = np.inf
mu_min = self.BinExposure * 10**logetaStar * self.IntegratedResponse(vd, vminStar,
self.BinEdges_left,
self.BinEdges_right,
mx, fp,
fn, delta)
else:
mu_max = self.BinExposure * 10**logetaStar * self.IntegratedResponse(vd, 1000.,
self.BinEdges_left,
self.BinEdges_right,
mx, fp,
fn, delta)
mu_min = 0.
if self.BinData > self.BinBkgr:
optimal_mu = (self.BinData - self.BinBkgr)
if mu_min < optimal_mu < mu_max:
mu = optimal_mu
elif optimal_mu <= mu_min:
mu = mu_min
else:
mu = mu_max
else:
optimal_mu = 0.
if mu_min > optimal_mu:
mu = mu_min
else:
mu = optimal_mu
mloglike = 2.0 * (mu + self.BinBkgr + np.log(factorial(self.BinData)) - self.BinData *
np.log(mu + self.BinBkgr))
return mloglike[0]
def Constrained_MC(self, data, mx, fp, fn, delta, vminStar, logetaStar):
nobs = len(data)
if delta == 0:
vmin_low = min(VMin(self.BinEdges_left, self.mT, mx, delta))
vd = 0.
elif delta < 0:
er_delta = np.abs(delta) * mx / (mx + np.mean(self.mT))
vd = 0.
if self.BinEdges_left > er_delta:
vmin_low = min(VMin(self.BinEdges_left, self.mT, mx, delta))
else:
vmin_low = 0.
elif delta > 0:
er_delta = np.abs(delta) * mx / (mx + np.mean(self.mT))
vd = np.sqrt(2. * delta * 10**(-6.) * (mx + np.mean(self.mT)) /
(mx * np.mean(self.mT))) * SpeedOfLight
if self.BinEdges_left > er_delta:
vmin_low = min(VMin(self.BinEdges_left, self.mT, mx, delta))
else:
vmin_low = vd
if vminStar > vmin_low:
mu_max = np.inf
mu_min = self.BinExposure * 10**logetaStar * self.IntegratedResponse(vd, vminStar,
self.BinEdges_left,
self.BinEdges_right,
mx, fp,
fn, delta)
else:
mu_max = self.BinExposure * 10**logetaStar * self.IntegratedResponse(vd, 1000.,
self.BinEdges_left,
self.BinEdges_right,
mx, fp,
fn, delta)
mu_min = 0.
if nobs > self.BinBkgr:
optimal_mu = (nobs - self.BinBkgr)
if mu_min < optimal_mu < mu_max:
mu = optimal_mu
elif optimal_mu <= mu_min:
mu = mu_min
else:
mu = mu_max
else:
optimal_mu = 0.
if mu_min > optimal_mu:
mu = mu_min
else:
mu = optimal_mu
mloglike = 2.0 * (mu + self.BinBkgr - nobs *
np.log(mu + self.BinBkgr) + np.log(factorial(self.BinData)))
return mloglike[0]
def GetLikelihoodTable(self, index, output_file_loc, mx, fp, fn, delta):
print('index =', index)
vminStar = self.call_table.vmin_logeta_sampling_table[index, 0, 0]
logetaStar_list = self.call_table.vmin_logeta_sampling_table[index, :, 1]
print("vminStar =", vminStar)
temp_file = output_file_loc + str(self.name) + "_" + str(index) + \
"_mx_" + str(mx) + "GeV_" + "fpfn" + str(fp) + "_" + str(fn) + \
"_ConstrainedLogLikelihoodList" + ".dat"
table = np.empty((0, 2))
if os.path.exists(temp_file):
size_of_file = len(np.loadtxt(temp_file))
fileexists = True
else:
size_of_file = 0
fileexists = False
if size_of_file >= 30:
pass
else:
if fileexists and size_of_file > 1 and np.loadtxt(temp_file).ndim == 2:
table = np.loadtxt(temp_file)
for logetaStar in logetaStar_list:
if logetaStar > table[-1, 0]:
print('V* =', vminStar, 'log(eta)* =', logetaStar)
constr_opt = self.Constrained_likelihood(mx, fp, fn, delta, vminStar, logetaStar)
if constr_opt < 0.:
pass
else:
print("index =", index, "; vminStar =", vminStar,
"; logetaStar =", logetaStar, "; constr_opt =", constr_opt)
table = np.append(table, [[logetaStar, constr_opt]], axis=0)
print("vminStar =", vminStar, "; table =", table)
print(temp_file)
np.savetxt(temp_file, table)
else:
for logetaStar in logetaStar_list:
print('V* =', vminStar, 'log(eta)* =', logetaStar)
constr_opt = self.Constrained_likelihood(mx, fp, fn, delta, vminStar, logetaStar)
if constr_opt < 0.:
pass
else:
print("index =", index, "; vminStar =", vminStar,
"; logetaStar =", logetaStar, "; constr_opt =", constr_opt)
table = np.append(table, [[logetaStar, constr_opt]], axis=0)
print("vminStar =", vminStar, "; table =", table)
print(temp_file)
np.savetxt(temp_file, table)
return
def ConstrainedLikelihoodList(self, class_name, output_file_loc, mx, fp, fn, delta, processes=None):
vmin_index_list = range(0, class_name[0].vmin_logeta_sampling_table.shape[0])
print("vmin_index_list =", vmin_index_list)
self.call_table = class_name[0]
kwargs = ({'index': index,
'mx': mx,
'fp': fp,
'fn': fn,
'delta': delta,
'output_file_loc': output_file_loc
}
for index in vmin_index_list)
par.parmap(self.GetLikelihoodTable, kwargs, processes)
return
class Crosses_HaloIndep(Experiment_HaloIndep):
""" Class for finding the crosses for experients with potential signal and
binned data.
Input:
exper_name: string
Name of experiment.
scattering_type: string
Type of scattering.
mPhi: float, optional
Mass of the mediator.
quenching_factor: float, optional
Quenching factor. If not given, the default used is specified in the data
modules.
"""
def __init__(self, exper_name, scattering_type, mPhi=mPhiRef, quenching_factor=None):
super().__init__(exper_name, scattering_type, mPhi)
module = import_file(INPUT_DIR + exper_name + ".py")
self.BinEdges = module.BinEdges
self.BinEdges_left = self.BinEdges[:-1]
self.BinEdges_right = self.BinEdges[1:]
self.BinData = module.BinData
self.BinError = module.BinError
self.QuenchingFactorOfEee = module.QuenchingFactorOfEee
if quenching_factor is not None:
self.QuenchingFactor = lambda e: quenching_factor
self._int_resp = self.IntegratedResponse
def _VminRange(self, E1, E2, mT, mx, delta):
E_delta = - delta * mx / (mT + mx)
vmin_of_E1 = VMin(E1, mT, mx, delta)
vmin_of_E2 = VMin(E2, mT, mx, delta)
print(vmin_of_E1, vmin_of_E2)
if E1 <= E_delta and E2 >= E_delta:
vmin_min = 0
else:
vmin_min = min(vmin_of_E1, vmin_of_E2)
vmin_max = max(vmin_of_E1, vmin_of_E2)
return (vmin_min, vmin_max)
def _AverageOverNuclides(self, quantity):
return np.sum(quantity * self.mass_fraction) / np.sum(self.mass_fraction)
def _Box(self, Eee1, Eee2, mT_avg, mx, fp, fn, delta, vmax, output_file=None):
print(self.name)
print('Eee1 =', Eee1, ' Eee2 =', Eee2)
dvmin = 1
if delta <= 0:
vmin_list = np.linspace(0, vmax, (vmax + dvmin)/dvmin)
resp_list = [self.Response(vmin1, Eee1, Eee2, mx, fp, fn, delta)
for vmin1 in vmin_list[:-1]] + [0.001]
else:
vdelta = min(VminDelta(self.mT, mx, delta))
print('vdelta =', vdelta)
vdelta = max(0, vdelta // dvmin * dvmin - dvmin)
vmin_list = np.linspace(vdelta, vmax, (vmax - vdelta + dvmin)/dvmin)
resp_list = [self.IntegratedResponse(vmin1, vmin2, Eee1, Eee2,
mx, fp, fn, delta)/dvmin
for vmin1, vmin2 in zip(vmin_list[:-1], vmin_list[1:])] + [0.001]
plt.close()
plt.plot(vmin_list, resp_list, '-')
int_resp = sum(resp_list) * dvmin
index_center = np.argmax(resp_list)
vmin_center = vmin_list[index_center]
resp_max = resp_list[index_center]
resp_min = max(resp_list[0], resp_list[-1])
if output_file is not None:
output_file = output_file.replace('temp.dat', self.name + '_' + str(Eee1) +
'_' + str(Eee2) + '.dat')
print(output_file)
np.savetxt(output_file,
np.transpose([vmin_list, np.array(resp_list)/int_resp]))
output_file = output_file.replace('.dat', '_notnorm.dat')
print(output_file)
np.savetxt(output_file, np.transpose([vmin_list, resp_list]))
if index_center > 0:
int_resp_left = \
interpolate.interp1d(resp_list[index_center::-1],
dvmin * np.cumsum(resp_list[index_center::-1]))
else:
def int_resp_left(r): return 0
if index_center < len(resp_list) - 1:
int_resp_right = \
interpolate.interp1d(resp_list[index_center:],
dvmin * np.cumsum(resp_list[index_center:]))
else:
def int_resp_right(r): return 0
print('resp_max =', resp_max)
print('resp_min =', resp_min)
print('int_resp =', int_resp)
def integrated_response(r):
return int_resp_left(r) + int_resp_right(r) - resp_max -\
ConfidenceLevel * int_resp
print(integrated_response(resp_min * 1.1), integrated_response(resp_max * 0.9))
response_CL = brentq(integrated_response, resp_min * 1.1, resp_max * 0.9)
print('response_CL =', response_CL)
plt.plot(vmin_list, response_CL * np.ones_like(vmin_list), '-')
vmin_interp_left = interpolate.interp1d(resp_list[:index_center + 1],
vmin_list[:index_center + 1])
vmin_interp_right = interpolate.interp1d(resp_list[index_center:],
vmin_list[index_center:])
vmin_error_left = - vmin_interp_left(response_CL) + vmin_center
vmin_error_right = vmin_interp_right(response_CL) - vmin_center
print('vmin_edges =',
VMin(Eee1/self.QuenchingFactor(Eee1), self.mT, mx, delta)[0],
VMin(Eee2/self.QuenchingFactor(Eee2), self.mT, mx, delta)[0])
print('vmin_interp =', vmin_interp_left(response_CL),
vmin_interp_right(response_CL))
print('vmin_center =', vmin_center)
print('vmin_error =', vmin_error_left, vmin_error_right)
# os.system("say 'Plot'")
# plt.show()
return (int_resp, vmin_center, vmin_error_left, vmin_error_right)
def _Boxes(self, mx, fp, fn, delta, vmax=2000, processes=None, output_file=None):
mT_avg = np.sum(self.mT * self.mass_fraction) / np.sum(self.mass_fraction)
print("mT_avg =", mT_avg)
print('vmax =', vmax)
arraybox = np.array([])
for Eee1, Eee2 in zip(self.BinEdges_left, self.BinEdges_right):
kwargs = {'Eee1': Eee1, 'Eee2': Eee2, 'mT_avg': mT_avg,
'mx': mx, 'fp': fp, 'fn': fn, 'delta': delta, 'vmax': vmax,
'output_file': output_file}
if Eee1 == self.BinEdges_left[0]:
arraybox = np.append(arraybox, self._Box(**kwargs))
else:
arraybox = np.column_stack((arraybox, np.array(self._Box(**kwargs))))
return arraybox
def _Rebin(self, index=9):
self.BinEdges = np.append(self.BinEdges[:index + 1], self.BinEdges[-1])
data, error = Rebin_data(self.BinData[index:], self.BinError[index:])
self.BinData = np.append(self.BinData[:index], data)
self.BinError = np.append(self.BinError[:index], error)
print('BinEdges =', self.BinEdges)
print('BinData =', self.BinData)
print('BinError =', self.BinError)
self.BinEdges_left = self.BinEdges[:-1]
self.BinEdges_right = self.BinEdges[1:]
def UpperLimit(self, mx, fp, fn, delta, vmin_min, vmin_max, vmin_step,
output_file, rebin=False, processes=None, **unused_kwargs):
if rebin:
self._Rebin()
box_table = self._Boxes(mx, fp, fn, delta, vmax=vmin_max, processes=processes)
int_resp_list = box_table[0, :]
vmin_center_list = box_table[1, :]
vmin_error_left_list = box_table[2, :]
vmin_error_right_list = box_table[3, :]
eta_list = self.BinData / int_resp_list
eta_error_list = self.BinError / int_resp_list
print('Bin Data', self.BinData)
print('Bin Error', self.BinError)
print('eta error', eta_list)
print('eta error list', eta_error_list)
result = np.array([int_resp_list, vmin_center_list, vmin_error_left_list,
vmin_error_right_list, eta_list, eta_error_list])
print(result)
with open(output_file, 'ab') as f_handle:
np.savetxt(f_handle, result)
return result
def IntResponseMatrix(self, mx, fp, fn, delta, vmin_min, vmin_max, vmin_step,
output_file, processes=None):
np.set_printoptions(threshold=np.nan)
vmin_list = np.linspace(vmin_min, vmin_max, (vmin_max - vmin_min)/vmin_step + 1)
kwargs = ({'vmin1': vmin1, 'vmin2': vmin2,
'Eee1': Eee1, 'Eee2': Eee2,
'mx': mx, 'fp': fp, 'fn': fn, 'delta': delta}
for vmin1, vmin2 in zip(vmin_list[:-1], vmin_list[1:])
for Eee1, Eee2 in zip(self.BinEdges_left, self.BinEdges_right))
matr = [self._int_resp(**k) for k in kwargs]
matr = np.reshape(matr, (len(vmin_list)-1, len(self.BinEdges_left)))
print('matrix =')
print(matr)
print(np.shape(matr))
print('determinant =', det(matr))
print('inverse =')
print(inv(matr))
with open(output_file, 'ab') as f_handle:
np.savetxt(f_handle, matr)
return matr
class Crosses_HaloIndep_Combined(Crosses_HaloIndep, Experiment_HaloIndep):
""" Class for finding the best-fit regions for the DAMA experiment
when considering the combined analysis of Na and I.
Constructor:
A list or tuple of 2 experiment names must be given, and, if not None, then
a list or tuple of 2 quenching_factors, one for Na and one for I.
"""
def __init__(self, exper_name, scattering_type, mPhi=mPhiRef, quenching_factor=None):
exper_name = exper_name.split()
super().__init__(exper_name[0], scattering_type, mPhi)
self.other = self.__class__.__bases__[0](exper_name[1], scattering_type, mPhi)
if quenching_factor is not None:
self.QuenchingFactor = lambda e: quenching_factor[0]
self.other.QuenchingFactor = lambda e: quenching_factor[1]
def _int_resp(self, vmin1, vmin2, Eee1, Eee2, mx, fp, fn, delta):
return self.IntegratedResponse(vmin1, vmin2, Eee1, Eee2, mx, fp, fn, delta) \
+ self.other.IntegratedResponse(vmin1, vmin2, Eee1, Eee2, mx, fp, fn, delta)
def _Rebin(self, initial_energy_bin, vmax, mx, num_rebinned_bins=19):
# build the new self.BinEdges_left and self.BinEdges_right
self.BinEdges_left = [initial_energy_bin[0]]
self.BinEdges_right = [initial_energy_bin[1]]
ratio = ERecoil_ratio(self.mT, self.other.mT, mx,
self.QuenchingFactor(0), self.other.QuenchingFactor(0))
ratio = round(ratio[0], 1)
print('ratio =', ratio)
vmin_left_edge = VMin(self.BinEdges_left[-1]/self.QuenchingFactor(0),
self.mT[0], mx, 0)
print('vmax =', vmax)
print('vmin_left_edge =', vmin_left_edge)
while vmin_left_edge < vmax:
self.BinEdges_left.append(self.BinEdges_left[-1] * ratio)
self.BinEdges_right.append(self.BinEdges_right[-1] * ratio)
vmin_left_edge = VMin(self.BinEdges_left[-1]/self.QuenchingFactor(0),
self.mT[0], mx, 0)
print('vmin_left_edge =', vmin_left_edge)
self.other.BinEdges_left = self.BinEdges_left
self.other.BinEdges_right = self.BinEdges_right
print('BinEdges_left =', self.BinEdges_left)
print('BinEdges_right =', self.BinEdges_right)
if self.BinEdges_right[-1] > self.BinEdges[-1]:
# add fake bins at higher energies
index = len(self.BinData) - num_rebinned_bins
data, error = Rebin_data(self.BinData[index:], self.BinError[index:])
num_added_bins = round((self.BinEdges_right[-1] - self.BinEdges[-1]) /
(self.BinEdges[-1] - self.BinEdges[-2]))
added_edges = np.linspace(self.BinEdges[-1], self.BinEdges_right[-1],
num_added_bins + 1)
self.BinEdges = np.append(self.BinEdges, added_edges)
self.BinData = np.append(self.BinData,
[data/num_rebinned_bins] * num_added_bins)
self.BinError = np.append(self.BinError,
[error/np.sqrt(num_rebinned_bins)] * num_added_bins)
print('BinEdges =', self.BinEdges)
print('BinData =', self.BinData)
print('BinError =', self.BinError)
# combine multiple bins to fit the edges from self.BinEdges_left and _right
self.BinData_rebinned = []
self.BinError_rebinned = []
for index in range(len(self.BinEdges_left)):
data = np.array([d for i, d in enumerate(self.BinData)
if self.BinEdges[i] >= self.BinEdges_left[index] and
self.BinEdges[i + 1] <= self.BinEdges_right[index]])
error = np.array([d for i, d in enumerate(self.BinError)
if self.BinEdges[i] >= self.BinEdges_left[index] and
self.BinEdges[i + 1] <= self.BinEdges_right[index]])
print('data =', data)
print('error =', error)
data_rebinned, error_rebinned = Rebin_data(data, error)
self.BinData_rebinned.append(data_rebinned)
self.BinError_rebinned.append(error_rebinned)
print('BinData_rebinned =', self.BinData_rebinned)
print('BinError_rebinned =', self.BinError_rebinned)
def UpperLimit(self, mx, fp, fn, delta, vmin_min, vmin_max, vmin_step,
output_file, initial_energy_bin=[2, 4], vmax=None, processes=None,
**unused_kwargs):
if delta != 0:
raise ValueError('delta has to be zero for DAMA halo-independent ' +
'combined analysis!')
if vmax is None:
vmax = vmin_step
self._Rebin(initial_energy_bin, vmax, mx)
box_table = self._Boxes(mx, fp, fn, delta, vmax=vmin_max, processes=processes,
output_file=output_file)
box_table_other = self.other._Boxes(mx, fp, fn, delta, vmax=vmin_max,
processes=processes, output_file=output_file)
print('box_table =')
print(repr(box_table))
print('box_table_other =')
print(repr(box_table_other))
int_resp_list = box_table[:, 0]
int_resp_list_other = box_table_other[:, 0]
vmin_center_list = box_table_other[:, 1]
vmin_error_left_list = box_table_other[:, 2]
vmin_error_right_list = box_table_other[:, 3]
size = len(int_resp_list)
int_resp_matrix = np.vstack((np.hstack((np.zeros((size - 1, 1)),
np.diag(int_resp_list[:-1]))),
np.zeros(size)))
int_resp_matrix += np.diag(int_resp_list_other)
print('int_resp_matrix =', int_resp_matrix)
int_resp_inverse = np.linalg.inv(int_resp_matrix)
eta_list = np.dot(int_resp_inverse, self.BinData_rebinned)
eta_error_list = np.sqrt(np.dot(int_resp_inverse ** 2,
np.array(self.BinError_rebinned) ** 2))
result = np.array([int_resp_list + int_resp_list_other,
vmin_center_list, vmin_error_left_list, vmin_error_right_list,
eta_list, eta_error_list])
print(result)
with open(output_file, 'ab') as f_handle:
np.savetxt(f_handle, result)
return result
class Standard_Halo_Model:
def __init__(self, exper_name, log_sigma_p):
self.name = exper_name
self.log_sigma_p = log_sigma_p
def UpperLimit(self, mx, fp, fn, delta, vmin_min, vmin_max, vmin_step,
output_file, **unused_kwargs):
if "eta0" in self.name:
eta = eta0Maxwellian
else:
eta = eta1Maxwellian
vmin_list = np.linspace(vmin_min, vmin_max, (vmin_max - vmin_min)/vmin_step + 1)
# print('vmin_list =', vmin_list)
eta_list = eta(vmin_list, vobs, v0bar, vesc)
eta_list = np.array([i if i > 0 else np.inf for i in eta_list])
# print('eta_list =', eta_list)
log_eta_list = self.log_sigma_p + np.log10(conversion_factor / mx * eta_list)
# print('log_eta_list =', log_eta_list)
result = np.transpose([vmin_list, log_eta_list])
result = result[result[:, 1] != np.inf]
print(result)
with open(output_file, 'ab') as f_handle:
np.savetxt(f_handle, result)
return result
| SamWitte/Codds_DarkMatter | src/experiment_HaloIndep.py | Python | gpl-2.0 | 55,784 | [
"DIRAC",
"Gaussian"
] | 9279561fd4389c7722f36c41bd2eab0677169cca985d1beadb3f40fdc3d9f1b3 |
"""Handle extraction of final files from processing pipelines into storage.
"""
import datetime
import os
import toolz as tz
from bcbio import log, utils
from bcbio.upload import shared, filesystem, galaxy, s3
from bcbio.pipeline import run_info
import bcbio.pipeline.datadict as dd
_approaches = {"filesystem": filesystem,
"galaxy": galaxy,
"s3": s3}
def project_from_sample(sample):
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files_project(sample, upload_config):
approach.update_file(finfo, None, upload_config)
return [[sample]]
def from_sample(sample):
"""Upload results of processing from an analysis pipeline sample.
"""
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files(sample):
approach.update_file(finfo, sample, upload_config)
return [[sample]]
# ## File information from sample
def _get_files(sample):
"""Retrieve files for the sample, dispatching by analysis type.
Each file is a dictionary containing the path plus associated
metadata about the file and pipeline versions.
"""
analysis = sample.get("analysis")
if analysis.lower() in ["variant", "snp calling", "variant2", "standard"]:
return _get_files_variantcall(sample)
elif analysis in ["RNA-seq"]:
return _get_files_rnaseq(sample)
elif analysis.lower() in ["smallrna-seq"]:
return _get_files_srnaseq(sample)
elif analysis.lower() in ["chip-seq"]:
return _get_files_chipseq(sample)
else:
return []
def _get_files_rnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_disambiguate(algorithm, sample, out)
out = _maybe_add_counts(algorithm, sample, out)
out = _maybe_add_cufflinks(algorithm, sample, out)
out = _maybe_add_oncofuse(algorithm, sample, out)
out = _maybe_add_rnaseq_variant_file(algorithm, sample, out)
out = _maybe_add_sailfish_files(algorithm, sample, out)
return _add_meta(out, sample)
def _get_files_srnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_trimming(algorithm, sample, out)
out = _maybe_add_seqbuster(algorithm, sample, out)
return _add_meta(out, sample)
def _get_files_chipseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
return _add_meta(out, sample)
def _add_meta(xs, sample=None, config=None):
out = []
for x in xs:
x["mtime"] = shared.get_file_timestamp(x["path"])
if sample and "sample" not in x:
if isinstance(sample["name"], (tuple, list)):
name = sample["name"][-1]
else:
name = "%s-%s" % (sample["name"],
run_info.clean_name(sample["description"]))
x["sample"] = name
if config:
if "fc_name" in config and "fc_date" in config:
x["run"] = "%s_%s" % (config["fc_date"], config["fc_name"])
else:
x["run"] = "project_%s" % datetime.datetime.now().strftime("%Y-%m-%d")
out.append(x)
return out
def _get_files_variantcall(sample):
"""Return output files for the variant calling pipeline.
"""
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_disambiguate(algorithm, sample, out)
out = _maybe_add_variant_file(algorithm, sample, out)
out = _maybe_add_sv(algorithm, sample, out)
out = _maybe_add_validate(algorithm, sample, out)
return _add_meta(out, sample)
def _maybe_add_validate(algorith, sample, out):
for i, plot in enumerate(tz.get_in(("validate", "grading_plots"), sample, [])):
ptype = os.path.splitext(plot)[-1].replace(".", "")
out.append({"path": plot,
"type": ptype,
"ext": "validate%s" % ("" if i == 0 else "-%s" % (i + 1))})
return out
def _maybe_add_rnaseq_variant_file(algorithm, sample, out):
if sample.get("vrn_file"):
out.append({"path": sample.get("vrn_file"),
"type": "vcf",
"ext": "vcf"})
return out
def _maybe_add_variant_file(algorithm, sample, out):
if sample.get("align_bam") is not None and sample.get("vrn_file"):
for x in sample["variants"]:
out.extend(_get_variant_file(x, ("vrn_file",)))
if x.get("bed_file"):
out.append({"path": x["bed_file"],
"type": "bed",
"ext": "%s-callregions" % x["variantcaller"],
"variantcaller": x["variantcaller"]})
if x.get("vrn_stats"):
for extra, fname in x["vrn_stats"].items():
ext = utils.splitext_plus(fname)[-1].replace(".", "")
out.append({"path": fname,
"type": ext,
"ext": "%s-%s" % (x["variantcaller"], extra),
"variantcaller": x["variantcaller"]})
return out
def _maybe_add_sv(algorithm, sample, out):
if sample.get("align_bam") is not None and sample.get("sv"):
for svcall in sample["sv"]:
for key in ["vrn_file", "cnr", "cns", "cnr_bed", "cnr_bedgraph", "seg",
"gainloss", "segmetrics", "vrn_bed", "vrn_bedpe"]:
out.extend(_get_variant_file(svcall, (key,)))
if "plot" in svcall:
for plot_name, fname in svcall["plot"].items():
ext = os.path.splitext(fname)[-1].replace(".", "")
out.append({"path": fname,
"type": ext,
"ext": "%s-%s" % (svcall["variantcaller"], plot_name),
"variantcaller": svcall["variantcaller"]})
if "sv-validate" in sample:
for vkey in ["csv", "plot", "df"]:
vfile = tz.get_in(["sv-validate", vkey], sample)
if vfile:
to_u = []
if isinstance(vfile, dict):
for svtype, fname in vfile.items():
to_u.append((fname, "-%s" % svtype))
else:
to_u.append((vfile, "-%s" % vkey if vkey in ["df"] else ""))
for vfile, ext in to_u:
vext = os.path.splitext(vfile)[-1].replace(".", "")
out.append({"path": vfile,
"type": vext,
"ext": "sv-validate%s" % ext})
return out
def _get_variant_file(x, key):
"""Retrieve VCF file with the given key if it exists, handling bgzipped.
"""
out = []
fname = utils.get_in(x, key)
upload_key = list(key)
upload_key[-1] = "do_upload"
do_upload = tz.get_in(tuple(upload_key), x, True)
if fname and do_upload:
if fname.endswith(".vcf.gz"):
out.append({"path": fname,
"type": "vcf.gz",
"ext": x["variantcaller"],
"variantcaller": x["variantcaller"]})
if utils.file_exists(fname + ".tbi"):
out.append({"path": fname + ".tbi",
"type": "vcf.gz.tbi",
"index": True,
"ext": x["variantcaller"],
"variantcaller": x["variantcaller"]})
elif fname.endswith((".vcf", ".bed", ".bedpe", ".bedgraph", ".cnr", ".cns", ".cnn", ".txt")):
ftype = utils.splitext_plus(fname)[-1][1:]
if ftype == "txt":
ftype = fname.split("-")[-1]
out.append({"path": fname,
"type": ftype,
"ext": x["variantcaller"],
"variantcaller": x["variantcaller"]})
return out
def _maybe_add_sailfish_files(algorithm, sample, out):
if "sailfish" in dd.get_expression_caller(sample):
out.append({"path": dd.get_sailfish_dir(sample),
"type": "directory",
"ext": os.path.join("sailfish", dd.get_sample_name(sample))})
return out
def _maybe_add_summary(algorithm, sample, out):
out = []
if "summary" in sample:
if sample["summary"].get("pdf"):
out.append({"path": sample["summary"]["pdf"],
"type": "pdf",
"ext": "summary"})
if sample["summary"].get("qc"):
out.append({"path": sample["summary"]["qc"],
"type": "directory",
"ext": "qc"})
if utils.get_in(sample, ("summary", "researcher")):
out.append({"path": sample["summary"]["researcher"],
"type": "tsv",
"sample": run_info.clean_name(utils.get_in(sample, ("upload", "researcher"))),
"ext": "summary"})
return out
def _maybe_add_alignment(algorithm, sample, out):
if _has_alignment_file(algorithm, sample):
for (fname, ext, isplus) in [(sample.get("work_bam"), "ready", False),
(utils.get_in(sample, ("work_bam-plus", "disc")), "disc", True),
(utils.get_in(sample, ("work_bam-plus", "sr")), "sr", True)]:
if fname and os.path.exists(fname):
if fname.endswith("bam"):
ftype, fext = "bam", ".bai"
elif fname.endswith("cram"):
ftype, fext = "cram", ".crai"
else:
raise ValueError("Unexpected alignment file type %s" % fname)
out.append({"path": fname,
"type": ftype,
"plus": isplus,
"ext": ext})
if utils.file_exists(fname + fext):
out.append({"path": fname + fext,
"type": ftype + fext,
"plus": isplus,
"index": True,
"ext": ext})
return out
def _maybe_add_disambiguate(algorithm, sample, out):
if "disambiguate" in sample:
for extra_name, fname in sample["disambiguate"].items():
ftype = os.path.splitext(fname)[-1].replace(".", "")
fext = ".bai" if ftype == "bam" else ""
if fname and os.path.exists(fname):
out.append({"path": fname,
"type": ftype,
"plus": True,
"ext": "disambiguate-%s" % extra_name})
if fext and utils.file_exists(fname + fext):
out.append({"path": fname + fext,
"type": ftype + fext,
"plus": True,
"index": True,
"ext": "disambiguate-%s" % extra_name})
return out
def _maybe_add_counts(algorithm, sample, out):
out.append({"path": sample["count_file"],
"type": "counts",
"ext": "ready"})
stats_file = os.path.splitext(sample["count_file"])[0] + ".stats"
if utils.file_exists(stats_file):
out.append({"path": stats_file,
"type": "count_stats",
"ext": "ready"})
return out
def _maybe_add_oncofuse(algorithm, sample, out):
if sample.get("oncofuse_file", None) is not None:
out.append({"path": sample["oncofuse_file"],
"type": "oncofuse_outfile",
"ext": "ready"})
return out
def _maybe_add_cufflinks(algorithm, sample, out):
if "cufflinks_dir" in sample:
out.append({"path": sample["cufflinks_dir"],
"type": "directory",
"ext": "cufflinks"})
return out
def _maybe_add_trimming(algorithm, sample, out):
fn = sample["collapse"] + "_size_stats"
if utils.file_exists(fn):
out.append({"path": fn,
"type": "trimming_stats",
"ext": "ready"})
return out
def _maybe_add_seqbuster(algorithm, sample, out):
fn = sample["seqbuster"]
if utils.file_exists(fn):
out.append({"path": fn,
"type": "counts",
"ext": "ready"})
return out
def _has_alignment_file(algorithm, sample):
return (((algorithm.get("aligner") or algorithm.get("realign")
or algorithm.get("recalibrate") or algorithm.get("bam_clean")
or algorithm.get("mark_duplicates")) and
algorithm.get("merge_bamprep", True)) and
sample.get("work_bam") is not None)
# ## File information from full project
def _get_files_project(sample, upload_config):
"""Retrieve output files associated with an entire analysis project.
"""
out = [{"path": sample["provenance"]["programs"]}]
for fname in ["bcbio-nextgen.log", "bcbio-nextgen-commands.log"]:
if os.path.exists(os.path.join(log.get_log_dir(sample["config"]), fname)):
out.append({"path": os.path.join(log.get_log_dir(sample["config"]), fname),
"type": "external_command_log",
"ext": ""})
if "summary" in sample and sample["summary"].get("project"):
out.append({"path": sample["summary"]["project"]})
mixup_check = tz.get_in(["summary", "mixup_check"], sample)
if mixup_check:
out.append({"path": sample["summary"]["mixup_check"],
"type": "directory", "ext": "mixup_check"})
if sample.get("seqcluster", None):
out.append({"path": sample["seqcluster"],
"type": "directory", "ext": "seqcluster"})
for x in sample.get("variants", []):
if "pop_db" in x:
out.append({"path": x["pop_db"],
"type": "sqlite",
"variantcaller": x["variantcaller"]})
for x in sample.get("variants", []):
if "population" in x:
pop_db = tz.get_in(["population", "db"], x)
if pop_db:
out.append({"path": pop_db,
"type": "sqlite",
"variantcaller": x["variantcaller"]})
out.extend(_get_variant_file(x, ("population", "vcf")))
for x in sample.get("variants", []):
if x.get("validate") and x["validate"].get("grading_summary"):
out.append({"path": x["validate"]["grading_summary"]})
break
if "coverage" in sample:
cov_db = tz.get_in(["coverage", "summary"], sample)
if cov_db:
out.append({"path": cov_db, "type": "sqlite", "ext": "coverage"})
all_coverage = tz.get_in(["coverage", "all"], sample)
if all_coverage:
out.append({"path": all_coverage, "type": "bed", "ext": "coverage"})
if dd.get_combined_counts(sample):
out.append({"path": dd.get_combined_counts(sample)})
if dd.get_annotated_combined_counts(sample):
out.append({"path": dd.get_annotated_combined_counts(sample)})
if dd.get_combined_fpkm(sample):
out.append({"path": dd.get_combined_fpkm(sample)})
if dd.get_combined_fpkm_isoform(sample):
out.append({"path": dd.get_combined_fpkm_isoform(sample)})
if dd.get_assembled_gtf(sample):
out.append({"path": dd.get_assembled_gtf(sample)})
if dd.get_dexseq_counts(sample):
out.append({"path": dd.get_dexseq_counts(sample)})
if dd.get_express_counts(sample):
out.append({"path": dd.get_express_counts(sample)})
if dd.get_express_fpkm(sample):
out.append({"path": dd.get_express_fpkm(sample)})
if dd.get_express_tpm(sample):
out.append({"path": dd.get_express_tpm(sample)})
if dd.get_isoform_to_gene(sample):
out.append({"path": dd.get_isoform_to_gene(sample)})
if dd.get_square_vcf(sample):
out.append({"path": dd.get_square_vcf(sample)})
return _add_meta(out, config=upload_config)
| guillermo-carrasco/bcbio-nextgen | bcbio/upload/__init__.py | Python | mit | 16,723 | [
"Galaxy"
] | f1ca5d9b102f997856b23788df7c7565f24141c5bad4e6e7d33a1f07edc557c1 |
#! /usr/bin/env python
########################################################################
# File : dirac-wms-jobs-select-output-search
# Author : Vladimir Romanovsky
########################################################################
"""
Retrieve output sandbox for DIRAC Jobs for the given selection and search for a string in their std.out
"""
import os
from shutil import rmtree
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
Script.registerSwitch("", "Status=", "Primary status")
Script.registerSwitch("", "MinorStatus=", "Secondary status")
Script.registerSwitch("", "ApplicationStatus=", "Application status")
Script.registerSwitch("", "Site=", "Execution site")
Script.registerSwitch("", "Owner=", "Owner (DIRAC nickname)")
Script.registerSwitch("", "JobGroup=", "Select jobs for specified job group")
Script.registerSwitch("", "Date=", "Date in YYYY-MM-DD format, if not specified default is today")
Script.registerSwitch("", "File=", "File name,if not specified default is std.out ")
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("String: string to search for")
_, args = Script.parseCommandLine(ignoreErrors=True)
# Default values
status = None
minorStatus = None
appStatus = None
site = None
owner = None
jobGroup = None
date = None
filename = "std.out"
if len(args) != 1:
Script.showHelp()
searchstring = str(args[0])
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() == "status":
status = switch[1]
elif switch[0].lower() == "minorstatus":
minorStatus = switch[1]
elif switch[0].lower() == "applicationstatus":
appStatus = switch[1]
elif switch[0].lower() == "site":
site = switch[1]
elif switch[0].lower() == "owner":
owner = switch[1]
elif switch[0].lower() == "jobgroup":
jobGroup = switch[1]
elif switch[0].lower() == "date":
date = switch[1]
elif switch[0].lower() == "file":
filename = switch[1]
selDate = date
if not date:
selDate = "Today"
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
resultDict = {}
result = dirac.selectJobs(
status=status,
minorStatus=minorStatus,
applicationStatus=appStatus,
site=site,
owner=owner,
jobGroup=jobGroup,
date=date,
)
if result["OK"]:
jobs = result["Value"]
else:
print("Error in selectJob", result["Message"])
DIRAC.exit(2)
for job in jobs:
result = dirac.getOutputSandbox(job)
if result["OK"]:
if os.path.exists("%s" % job):
lines = []
try:
lines = open(os.path.join(job, filename)).readlines()
except Exception as x:
errorList.append((job, x))
for line in lines:
if line.count(searchstring):
resultDict[job] = line
rmtree("%s" % (job))
else:
errorList.append((job, result["Message"]))
exitCode = 2
for result in resultDict.items():
print(result)
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
| ic-hep/DIRAC | src/DIRAC/Interfaces/scripts/dirac_wms_jobs_select_output_search.py | Python | gpl-3.0 | 3,510 | [
"DIRAC"
] | 2f00c829becab1d5b7bb5d1f205ef8f5b2d608248b684d7fb4313a7517c879b9 |
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
# pylint: disable=E1111
import unittest
import json
import requests
import datetime
import ddt
import random
from urllib import quote
from django.test import TestCase
from nose.tools import raises
from mock import Mock, patch
from django.conf import settings
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
from django.core import mail
from django.utils.timezone import utc
from django.test import RequestFactory
from django.contrib.auth.models import User
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import UserFactory
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory
from student.roles import CourseBetaTesterRole
from microsite_configuration import microsite
from instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.models import StudentModule
# modules which are mocked in test cases.
import instructor_task.api
import instructor.views.api
from instructor.views.api import _split_input_list, common_exceptions_400
from instructor_task.api_helper import AlreadyRunningError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from shoppingcart.models import (
RegistrationCodeRedemption, Order,
PaidCourseRegistration, Coupon, Invoice, CourseRegistrationCode
)
from course_modes.models import CourseMode
from .test_tools import msk_from_problem_urlname
from ..views.tools import get_extended_due
EXPECTED_CSV_HEADER = '"code","course_id","company_name","created_by","redeemed_by","invoice_id","purchaser","customer_reference_number","internal_reference"'
EXPECTED_COUPON_CSV_HEADER = '"course_id","percentage_discount","code_redeemed_count","description"'
# ddt data for test cases involving reports
REPORTS_DATA = (
{
'report_type': 'grade',
'instructor_api_endpoint': 'calculate_grades_csv',
'task_api_endpoint': 'instructor_task.api.submit_calculate_grades_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrolled student profile',
'instructor_api_endpoint': 'get_students_features',
'task_api_endpoint': 'instructor_task.api.submit_calculate_students_features_csv',
'extra_instructor_api_kwargs': {'csv': '/csv'}
}
)
@common_exceptions_400
def view_success(request): # pylint: disable=W0613
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=W0613
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=W0613
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
class TestCommonExceptions400(unittest.TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
def setUp(self):
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment', {'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_distribution', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership',
{'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('proxy_legacy_analytics', {'aname': 'ProblemGradeDistribution'}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('calculate_grades_csv', {}),
('get_students_features', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in ['send_email', 'students_update_enrollment', 'bulk_beta_modify_access']:
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics', 'list_forum_members']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@ddt.ddt
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
def setUp(self):
self.request = RequestFactory().request()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',
last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/{}/about'.format(self.course.id)
self.course_path = '/courses/{}/'.format(self.course.id)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been enrolled in {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name, proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in {display_name}.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name, proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, you will see {display_name} listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, display_name=self.course.display_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in {display_name} "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student".format(
display_name=self.course.display_name,
)
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course {display_name} by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org".format(
display_name=self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path,
display_name=self.course.display_name,
)
)
@patch('instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.ddt
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIBulkBetaEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/{}/about'.format(self.course.id)
self.course_path = '/courses/{}/'.format(self.course.id)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {}\n\nYou have been invited to be a beta tester "
"for {} at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {}".format(
self.notenrolled_student.profile.name,
self.course.display_name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been removed from a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"{display_name} at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
display_name=self.course.display_name,
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsAccess(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
for user in [self.instructor, self.other_user]:
for identifier_attr in [user.email, user.username]:
for rolename in ["Administrator", "Moderator", "Community TA"]:
for action in ["allow", "revoke"]:
self.assert_update_forum_role_membership(user, identifier_attr, rolename, action)
def assert_update_forum_role_membership(self, current_user, identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(
url,
{
'unique_student_identifier': identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = current_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@ddt.ddt
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsDataDump(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
def setUp(self):
super(TestInstructorAPILevelsDataDump, self).setUp()
self.course = CourseFactory.create()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
#create testing invoice 1
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
def test_invalidate_sale_record(self):
"""
Testing the sale invalidating scenario.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=self.sale_invoice_1
)
course_registration_code.save()
data = {'invoice_number': self.sale_invoice_1.id, 'event_type': "invalidate"}
url = reverse('sale_validation', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url, method="POST", data=data)
#Now try to fetch data against not existing invoice number
test_data_1 = {'invoice_number': 100, 'event_type': "invalidate"}
self.assert_request_status_code(404, url, method="POST", data=test_data_1)
# Now invalidate the same invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("The sale associated with this invoice has already been invalidated.", response.content)
# now re_validate the invoice number
data['event_type'] = "re_validate"
self.assert_request_status_code(200, url, method="POST", data=data)
# Now re_validate the same actove invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("This invoice is already active.", response.content)
test_data_2 = {'invoice_number': self.sale_invoice_1.id}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_2)
self.assertIn("Missing required event_type parameter", response.content)
test_data_3 = {'event_type': "re_validate"}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_3)
self.assertIn("Missing required invoice_number parameter", response.content)
# submitting invalid invoice number
data['invoice_number'] = 'testing'
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("invoice_number must be an integer, {value} provided".format(value=data['invoice_number']), response.content)
def test_get_ecommerce_purchase_features_csv(self):
"""
Test that the response from get_purchase_transaction is in csv format.
"""
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
url = reverse('get_purchase_transaction', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_sale_records_features_csv(self):
"""
Test that the response from get_sale_records is in csv format.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=self.sale_invoice_1
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_sale_records_features_json(self):
"""
Test that the response from get_sale_records is in json format.
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=self.sale_invoice_1
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
for res in res_json['sale']:
self.validate_sale_records_response(res, course_registration_code, self.sale_invoice_1, 0)
def test_get_sale_records_features_with_used_code(self):
"""
Test that the response from get_sale_records is in json format and using one of the registration codes.
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='qwerty{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=self.sale_invoice_1
)
course_registration_code.save()
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# now using registration code
self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'qwerty0'})
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
for res in res_json['sale']:
self.validate_sale_records_response(res, course_registration_code, self.sale_invoice_1, 1)
def test_get_sale_records_features_with_multiple_invoices(self):
"""
Test that the response from get_sale_records is in json format for multiple invoices
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='qwerty{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=self.sale_invoice_1
)
course_registration_code.save()
#create test invoice 2
sale_invoice_2 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw_2', recipient_email='test2@test.com', customer_reference_number='2Fwe23S',
internal_reference="B", course_id=self.course.id
)
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='xyzmn{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=sale_invoice_2
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
self.validate_sale_records_response(res_json['sale'][0], course_registration_code, self.sale_invoice_1, 0)
self.validate_sale_records_response(res_json['sale'][1], course_registration_code, sale_invoice_2, 0)
def validate_sale_records_response(self, res, course_registration_code, invoice, used_codes):
"""
validate sale records attribute values with the response object
"""
self.assertEqual(res['total_amount'], invoice.total_amount)
self.assertEqual(res['recipient_email'], invoice.recipient_email)
self.assertEqual(res['recipient_name'], invoice.recipient_name)
self.assertEqual(res['company_name'], invoice.company_name)
self.assertEqual(res['company_contact_name'], invoice.company_contact_name)
self.assertEqual(res['company_contact_email'], invoice.company_contact_email)
self.assertEqual(res['internal_reference'], invoice.internal_reference)
self.assertEqual(res['customer_reference_number'], invoice.customer_reference_number)
self.assertEqual(res['invoice_number'], invoice.id)
self.assertEqual(res['created_by'], course_registration_code.created_by.username)
self.assertEqual(res['course_id'], invoice.course_id.to_deprecated_string())
self.assertEqual(res['total_used_codes'], used_codes)
self.assertEqual(res['total_codes'], 5)
def test_get_ecommerce_purchase_features_with_coupon_info(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_purchase_transaction.
"""
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
url = reverse('get_purchase_transaction', kwargs={'course_id': self.course.id.to_deprecated_string()})
# using coupon code
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for res in res_json['students']:
self.validate_purchased_transaction_response(res, self.cart, self.instructor, self.coupon_code)
def test_get_ecommerce_purchases_features_without_coupon_info(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_purchase_transaction.
"""
url = reverse('get_purchase_transaction', kwargs={'course_id': self.course.id.to_deprecated_string()})
carts, instructors = ([] for i in range(2))
# purchasing the course by different users
for _ in xrange(3):
test_instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=test_instructor.username, password='test')
cart = Order.get_cart_for_user(test_instructor)
carts.append(cart)
instructors.append(test_instructor)
PaidCourseRegistration.add_to_order(cart, self.course.id)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for res, i in zip(res_json['students'], xrange(3)):
self.validate_purchased_transaction_response(res, carts[i], instructors[i], 'None')
def validate_purchased_transaction_response(self, res, cart, user, code):
"""
validate purchased transactions attribute values with the response object
"""
item = cart.orderitem_set.all().select_subclasses()[0]
self.assertEqual(res['coupon_code'], code)
self.assertEqual(res['username'], user.username)
self.assertEqual(res['email'], user.email)
self.assertEqual(res['list_price'], item.list_price)
self.assertEqual(res['unit_cost'], item.unit_cost)
self.assertEqual(res['order_id'], cart.id)
self.assertEqual(res['orderitem_id'], item.id)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
@ddt.data(True, False)
def test_get_students_features_cohorted(self, is_cohorted):
"""
Test that get_students_features includes cohort info when the course is
cohorted, and does not when the course is not cohorted.
"""
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
self.course.cohort_config = {'cohorted': is_cohorted}
self.store.update_item(self.course, self.instructor.id)
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertEqual('cohort' in res_json['feature_names'], is_cohorted)
@patch.object(instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"3","41","42"\n'
))
self.assertTrue(body.endswith('"8","41","42"\n'))
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
def test_calculate_report_csv_success(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
with patch(task_api_endpoint):
response = self.client.get(url, {})
success_status = "Your {report_type} report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section.".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
def test_calculate_report_csv_already_running(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
with patch(task_api_endpoint) as mock:
mock.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "{report_type} report generation task is already in progress. Check the 'Pending Instructor Tasks' table for the status of the task. When completed, the report will be available for download in the table below.".format(report_type=report_type)
self.assertIn(already_running_status, response.content)
def test_get_distribution_no_feature(self):
"""
Test that get_distribution lists available features
when supplied no feature parameter.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + u'?feature=')
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
def test_get_distribution_unavailable_feature(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})
self.assertEqual(response.status_code, 400)
def test_get_distribution_gender(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'gender'})
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(res_json['feature_results']['data']['m'], 6)
self.assertEqual(res_json['feature_results']['choices_display_names']['m'], 'Male')
self.assertEqual(res_json['feature_results']['data']['no_data'], 0)
self.assertEqual(res_json['feature_results']['choices_display_names']['no_data'], 'No Data')
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
self.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPITaskLists(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.object(instructor_task.api, 'get_instructor_task_history')
class TestInstructorEmailContentList(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def setup_fake_email_info(self, num_emails, with_failures=False):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
if with_failures:
failed = random.randint(1, 15401)
else:
failed = 0
self.tasks[email_id] = FakeContentTask(email_id, num_sent, failed, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent, failed)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request, with_failures=False):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails, with_failures)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
return response
def check_emails_sent(self, num_emails, task_history_request, with_failures=False):
""" Tests sending emails with or without failures """
response = self.get_email_content_response(num_emails, task_history_request, with_failures)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), num_emails)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(expected_email_info, actual_email_info)
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
self.check_emails_sent(50, task_history_request)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent', 'requester']:
self.assertEqual(returned_info[info], None)
def test_list_email_with_failure(self, task_history_request):
""" Test the handling of email task that had failures """
self.check_emails_sent(1, task_history_request, True)
def test_list_many_emails_with_failures(self, task_history_request):
""" Test the handling of many emails with failures """
self.check_emails_sent(50, task_history_request, True)
def test_list_email_with_no_successes(self, task_history_request):
task_info = FakeContentTask(0, 0, 10, 'expected')
email = FakeEmail(0)
email_info = FakeEmailInfo(email, 0, 10)
task_history_request.return_value = [task_info]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.return_value = email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_info_list = json.loads(response.content)['emails']
self.assertEqual(len(returned_info_list), 1)
returned_info = returned_info_list[0]
expected_info = email_info.to_dict()
self.assertDictEqual(expected_info, returned_info)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(ANALYTICS_SERVER_URL="http://robotanalyticsserver.netbot:900/")
@override_settings(ANALYTICS_API_KEY="robot_api_key")
class TestInstructorAPIAnalyticsProxy(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor analytics proxy endpoint.
"""
class FakeProxyResponse(object):
""" Fake successful requests response object. """
def __init__(self):
self.status_code = requests.status_codes.codes.OK
self.content = '{"test_content": "robot test content"}'
class FakeBadProxyResponse(object):
""" Fake strange-failed requests response object. """
def __init__(self):
self.status_code = 'notok.'
self.content = '{"test_content": "robot test content"}'
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_url(self, act):
""" Test legacy analytics proxy url generation. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# check request url
expected_url = "{url}get?aname={aname}&course_id={course_id!s}&apikey={api_key}".format(
url="http://robotanalyticsserver.netbot:900/",
aname="ProblemGradeDistribution",
course_id=self.course.id.to_deprecated_string(),
api_key="robot_api_key",
)
act.assert_called_once_with(expected_url)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy(self, act):
"""
Test legacy analytics content proxyin, actg.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_res = {'test_content': "robot test content"}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_reqfailed(self, act):
""" Test proxy when server reponds with failure. """
act.return_value = self.FakeBadProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 500)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_missing_param(self, act):
""" Test proxy when missing the aname query parameter. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 400)
self.assertFalse(act.called)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append(
"Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus',
'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
[u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestDueDateExtensions(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensions, self).setUp()
due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
course = CourseFactory.create()
week1 = ItemFactory.create(due=due)
week2 = ItemFactory.create(due=due)
week3 = ItemFactory.create() # No due date
course.children = [week1.location.to_deprecated_string(), week2.location.to_deprecated_string(),
week3.location.to_deprecated_string()]
homework = ItemFactory.create(
parent_location=week1.location,
due=due
)
week1.children = [homework.location.to_deprecated_string()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=homework.location).save()
self.course = course
self.week1 = week1
self.homework = homework
self.week2 = week2
self.week3 = week3
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_change_to_invalid_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '01/01/2009 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_change_nonexistent_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week3.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week3, self.user1)
)
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_reset_nonexistent_extension(self):
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 400, response.content)
def test_reset_extension_to_deleted_date(self):
"""
Test that we can delete a due date extension after deleting the normal
due date, without causing an error.
"""
self.test_change_due_date()
self.week1.due = None
self.week1 = self.store.update_item(self.week1, self.user1.id)
# Now, week1's normal due date is deleted but the extension still exists.
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(ModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
def setUp(self):
"""
Fixtures.
"""
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'sale_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
order_id=i, registration_code_id=i, redeemed_by=self.instructor
)
registration_code_redemption.save()
def test_user_invoice_copy_preference(self):
"""
Test to remember user invoice copy preference
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'sale_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
# user invoice copy preference will be saved in api user preference; model
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], True)
# updating the user invoice copy preference during code generation flow
data['invoice'] = ''
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], False)
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'sale_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
coupon = Coupon(code='first', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor)
coupon.save()
data = {
'total_registration_codes': 3, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'sale_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 2, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'sale_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'spent_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 7)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'sale_price': 122.45, 'company_contact_email': 'Test@company.com', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
order_id=i, registration_code_id=i, redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'active_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 9)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'sale_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'active_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse(
'get_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {'download_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 14)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'sale_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'download_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_get_codes_with_sale_invoice(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 5.5, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'sale_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
url = reverse('get_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'download_company_name': 'Group Invoice'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
def test_get_historical_coupon_codes(self):
"""
Test to download a response of all the active coupon codes
"""
get_coupon_code_url = reverse(
'get_coupon_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True
)
coupon.save()
response = self.client.get(get_coupon_code_url)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_COUPON_CSV_HEADER))
| adlnet-archive/edx-platform | lms/djangoapps/instructor/tests/test_api.py | Python | agpl-3.0 | 132,870 | [
"VisIt"
] | 51500d8bab4c3e52c183e5798571811649808112e13d3da0de23ad1d8d990983 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides class BaseVisualStim and mixins; subclass to get visual stimuli
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from pathlib import Path
from statistics import mean
from psychopy.colors import Color, colorSpaces
from psychopy.layout import Vector, Position, Size, Vertices, unitTypes
# Ensure setting pyglet.options['debug_gl'] to False is done prior to any
# other calls to pyglet or pyglet submodules, otherwise it may not get picked
# up by the pyglet GL engine and have no effect.
# Shaders will work but require OpenGL2.0 drivers AND PyOpenGL3.0+
import pyglet
pyglet.options['debug_gl'] = False
GL = pyglet.gl
try:
from PIL import Image
except ImportError:
from . import Image
import copy
import sys
import os
from psychopy import logging
# tools must only be imported *after* event or MovieStim breaks on win32
# (JWP has no idea why!)
from psychopy.tools.arraytools import val2array
from psychopy.tools.attributetools import (attributeSetter, logAttrib,
setAttribute)
from psychopy.tools.monitorunittools import (cm2pix, deg2pix, pix2cm,
pix2deg, convertToPix)
from psychopy.visual.helpers import (pointInPolygon, polygonsOverlap,
setColor, findImageFile)
from psychopy.tools.typetools import float_uint8
from psychopy.tools.arraytools import makeRadialMatrix, createLumPattern
from psychopy.tools.colorspacetools import dkl2rgb, lms2rgb # pylint: disable=W0611
from . import globalVars
import numpy
from numpy import pi
from psychopy.constants import NOT_STARTED, STARTED, STOPPED
reportNImageResizes = 5 # permitted number of resizes
"""
There are several base and mix-in visual classes for multiple inheritance:
- MinimalStim: non-visual house-keeping code common to all visual stim
RatingScale inherits only from MinimalStim.
- WindowMixin: attributes/methods about the stim relative to
a visual.Window.
- LegacyVisualMixin: deprecated visual methods (eg, setRGB) added
to BaseVisualStim
- ColorMixin: for Stim that need color methods (most, not Movie)
color-related methods and attribs
- ContainerMixin: for stim that need polygon .contains() methods.
Most need this, but not Text. .contains(), .overlaps()
- TextureMixin: for texture methods namely _createTexture
(Grating, not Text)
seems to work; caveat: There were issues in earlier (non-MI) versions
of using _createTexture so it was pulled out of classes.
Now it's inside classes again. Should be watched.
- BaseVisualStim: = Minimal + Window + Legacy. Furthermore adds c
ommon attributes like orientation, opacity, contrast etc.
Typically subclass BaseVisualStim to create new visual stim classes, and add
mixin(s) as needed to add functionality.
"""
class MinimalStim:
"""Non-visual methods and attributes for BaseVisualStim and RatingScale.
Includes: name, autoDraw, autoLog, status, __str__
"""
def __init__(self, name=None, autoLog=None):
if name not in (None, ''):
self.__dict__['name'] = name
else:
self.__dict__['name'] = 'unnamed %s' % self.__class__.__name__
self.status = NOT_STARTED
self.autoLog = autoLog
super(MinimalStim, self).__init__()
if self.autoLog:
msg = ("%s is calling MinimalStim.__init__() with autolog=True. "
"Set autoLog to True only at the end of __init__())")
logging.warning(msg % self.__class__.__name__)
def __str__(self, complete=False):
"""
"""
if hasattr(self, '_initParams'):
className = self.__class__.__name__
paramStrings = []
for param in self._initParams:
if hasattr(self, param):
val = getattr(self, param)
valStr = repr(getattr(self, param))
if len(repr(valStr)) > 50 and not complete:
if val.__class__.__name__ == 'attributeSetter':
_name = val.__getattribute__.__class__.__name__
else:
_name = val.__class__.__name__
valStr = "%s(...)" % _name
else:
valStr = 'UNKNOWN'
paramStrings.append("%s=%s" % (param, valStr))
# this could be used if all params are known to exist:
# paramStrings = ["%s=%s" %(param, getattr(self, param))
# for param in self._initParams]
params = ", ".join(paramStrings)
s = "%s(%s)" % (className, params)
else:
s = object.__repr__(self)
return s
# Might seem simple at first, but this ensures that "name" attribute
# appears in docs and that name setting and updating is logged.
@attributeSetter
def name(self, value):
"""The name (`str`) of the object to be using during logged messages
about this stim. If you have multiple stimuli in your experiment this
really helps to make sense of log files!
If name = None your stimulus will be called "unnamed <type>", e.g.
visual.TextStim(win) will be called "unnamed TextStim" in the logs.
"""
self.__dict__['name'] = value
@attributeSetter
def autoDraw(self, value):
"""Determines whether the stimulus should be automatically drawn
on every frame flip.
Value should be: `True` or `False`. You do NOT need to set this
on every frame flip!
"""
self.__dict__['autoDraw'] = value
toDraw = self.win._toDraw
toDrawDepths = self.win._toDrawDepths
beingDrawn = (self in toDraw)
if value == beingDrawn:
return # nothing to do
elif value:
# work out where to insert the object in the autodraw list
depthArray = numpy.array(toDrawDepths)
# all indices where true:
iis = numpy.where(depthArray < self.depth)[0]
if len(iis): # we featured somewhere before the end of the list
toDraw.insert(iis[0], self)
toDrawDepths.insert(iis[0], self.depth)
else:
toDraw.append(self)
toDrawDepths.append(self.depth)
# Add to editable list (if needed)
self.win.addEditable(self)
# Mark as started
self.status = STARTED
elif value == False:
# remove from autodraw lists
toDrawDepths.pop(toDraw.index(self)) # remove from depths
toDraw.remove(self) # remove from draw list
# Remove from editable list (if needed)
self.win.removeEditable(self)
# Mark as stopped
self.status = STOPPED
def setAutoDraw(self, value, log=None):
"""Sets autoDraw. Usually you can use 'stim.attribute = value'
syntax instead, but use this method to suppress the log message.
"""
setAttribute(self, 'autoDraw', value, log)
@attributeSetter
def autoLog(self, value):
"""Whether every change in this stimulus should be auto logged.
Value should be: `True` or `False`. Set to `False` if your stimulus is
updating frequently (e.g. updating its position every frame) and you
want to avoid swamping the log file with messages that aren't likely to
be useful.
"""
self.__dict__['autoLog'] = value
def setAutoLog(self, value=True, log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message.
"""
setAttribute(self, 'autoLog', value, log)
class LegacyVisualMixin:
"""Class to hold deprecated visual methods and attributes.
Intended only for use as a mixin class for BaseVisualStim, to maintain
backwards compatibility while reducing clutter in class BaseVisualStim.
"""
# def __init__(self):
# super(LegacyVisualMixin, self).__init__()
def _calcSizeRendered(self):
"""DEPRECATED in 1.80.00. This functionality is now handled
by _updateVertices() and verticesPix
"""
# raise DeprecationWarning, "_calcSizeRendered() was deprecated in
# 1.80.00. This functionality is now handled by _updateVertices()
# and verticesPix"
if self.units in ['norm', 'pix', 'height']:
self._sizeRendered = copy.copy(self.size)
elif self.units in ['deg', 'degs']:
self._sizeRendered = deg2pix(self.size, self.win.monitor)
elif self.units == 'cm':
self._sizeRendered = cm2pix(self.size, self.win.monitor)
else:
logging.error("Stimulus units should be 'height', 'norm', "
"'deg', 'cm' or 'pix', not '%s'" % self.units)
def _calcPosRendered(self):
"""DEPRECATED in 1.80.00. This functionality is now handled
by _updateVertices() and verticesPix.
"""
# raise DeprecationWarning, "_calcSizeRendered() was deprecated
# in 1.80.00. This functionality is now handled by
# _updateVertices() and verticesPix"
if self.units in ['norm', 'pix', 'height']:
self._posRendered = copy.copy(self.pos)
elif self.units in ['deg', 'degs']:
self._posRendered = deg2pix(self.pos, self.win.monitor)
elif self.units == 'cm':
self._posRendered = cm2pix(self.pos, self.win.monitor)
def _getPolyAsRendered(self):
"""DEPRECATED. Return a list of vertices as rendered.
"""
oriRadians = numpy.radians(self.ori)
sinOri = numpy.sin(-oriRadians)
cosOri = numpy.cos(-oriRadians)
x = (self._verticesRendered[:, 0] * cosOri -
self._verticesRendered[:, 1] * sinOri)
y = (self._verticesRendered[:, 0] * sinOri +
self._verticesRendered[:, 1] * cosOri)
return numpy.column_stack((x, y)) + self._posRendered
@attributeSetter
def depth(self, value):
"""DEPRECATED, depth is now controlled simply by drawing order.
"""
self.__dict__['depth'] = value
class LegacyForeColorMixin:
"""
Mixin class to give an object all of the legacy functions for setting foreground color
"""
def setDKL(self, color, operation=''):
"""DEPRECATED since v1.60.05: Please use the `color` attribute
"""
self.setForeColor(color, 'dkl', operation)
def setLMS(self, color, operation=''):
"""DEPRECATED since v1.60.05: Please use the `color` attribute
"""
self.setForeColor(color, 'lms', operation)
@property
def foreRGB(self):
"""
DEPRECATED: Legacy property for setting the foreground color of a stimulus in RGB, instead use `obj._foreColor.rgb`
"""
return self._foreColor.rgb
@foreRGB.setter
def foreRGB(self, value):
self.foreColor = Color(value, 'rgb')
@property
def RGB(self):
"""
DEPRECATED: Legacy property for setting the foreground color of a stimulus in RGB, instead use `obj._foreColor.rgb`
"""
return self.foreRGB
@RGB.setter
def RGB(self, value):
self.foreRGB = value
def setRGB(self, color, operation='', log=None):
"""
DEPRECATED: Legacy setter for foreground RGB, instead set `obj._foreColor.rgb`
"""
self.setForeColor(color, 'rgb', operation, log)
def setForeRGB(self, color, operation='', log=None):
"""
DEPRECATED: Legacy setter for foreground RGB, instead set `obj._foreColor.rgb`
"""
self.setForeColor(color, 'rgb', operation, log)
@property
def foreColorSpace(self):
"""Deprecated, please use colorSpace to set color space for the entire
object.
"""
return self.colorSpace
@foreColorSpace.setter
def foreColorSpace(self, value):
logging.warning(
"Setting color space by attribute rather than by object is deprecated. Value of foreColorSpace has been assigned to colorSpace.")
self.colorSpace = value
class LegacyFillColorMixin:
"""
Mixin class to give an object all of the legacy functions for setting fill color
"""
@property
def fillRGB(self):
"""
DEPRECATED: Legacy property for setting the fill color of a stimulus in RGB, instead use `obj._fillColor.rgb`
"""
return self._fillColor.rgb
@fillRGB.setter
def fillRGB(self, value):
self.fillColor = Color(value, 'rgb')
@property
def backRGB(self):
"""
DEPRECATED: Legacy property for setting the fill color of a stimulus in RGB, instead use `obj._fillColor.rgb`
"""
return self.fillRGB
@backRGB.setter
def backRGB(self, value):
self.fillRGB = value
def setFillRGB(self, color, operation='', log=None):
"""
DEPRECATED: Legacy setter for fill RGB, instead set `obj._fillColor.rgb`
"""
self.setFillColor(color, 'rgb', operation, log)
def setBackRGB(self, color, operation='', log=None):
"""
DEPRECATED: Legacy setter for fill RGB, instead set `obj._fillColor.rgb`
"""
self.setFillColor(color, 'rgb', operation, log)
@property
def fillColorSpace(self):
"""Deprecated, please use colorSpace to set color space for the entire
object.
"""
return self.colorSpace
@fillColorSpace.setter
def fillColorSpace(self, value):
logging.warning("Setting color space by attribute rather than by object is deprecated. Value of fillColorSpace has been assigned to colorSpace.")
self.colorSpace = value
@property
def backColorSpace(self):
"""Deprecated, please use colorSpace to set color space for the entire
object.
"""
return self.colorSpace
@backColorSpace.setter
def backColorSpace(self, value):
logging.warning(
"Setting color space by attribute rather than by object is deprecated. Value of backColorSpace has been assigned to colorSpace.")
self.colorSpace = value
class LegacyBorderColorMixin:
"""
Mixin class to give an object all of the legacy functions for setting border color
"""
@property
def borderRGB(self):
"""
DEPRECATED: Legacy property for setting the border color of a stimulus in RGB, instead use `obj._borderColor.rgb`
"""
return self._borderColor.rgb
@borderRGB.setter
def borderRGB(self, value):
self.borderColor = Color(value, 'rgb')
@property
def lineRGB(self):
"""
DEPRECATED: Legacy property for setting the border color of a stimulus in RGB, instead use `obj._borderColor.rgb`
"""
return self.borderRGB
@lineRGB.setter
def lineRGB(self, value):
self.borderRGB = value
def setBorderRGB(self, color, operation='', log=None):
"""
DEPRECATED: Legacy setter for border RGB, instead set `obj._borderColor.rgb`
"""
self.setBorderColor(color, 'rgb', operation, log)
def setLineRGB(self, color, operation='', log=None):
"""
DEPRECATED: Legacy setter for border RGB, instead set `obj._borderColor.rgb`
"""
self.setBorderColor(color, 'rgb', operation, log)
@property
def borderColorSpace(self):
"""Deprecated, please use colorSpace to set color space for the entire
object
"""
return self.colorSpace
@borderColorSpace.setter
def borderColorSpace(self, value):
logging.warning(
"Setting color space by attribute rather than by object is deprecated. Value of borderColorSpace has been assigned to colorSpace.")
self.colorSpace = value
@property
def lineColorSpace(self):
"""Deprecated, please use colorSpace to set color space for the entire
object
"""
return self.colorSpace
@lineColorSpace.setter
def lineColorSpace(self, value):
logging.warning(
"Setting color space by attribute rather than by object is deprecated. Value of lineColorSpace has been assigned to colorSpace.")
self.colorSpace = value
class LegacyColorMixin(LegacyForeColorMixin, LegacyFillColorMixin, LegacyBorderColorMixin):
"""
Mixin class to give an object all of the legacy functions for setting all colors (fore, fill and border
"""
class BaseColorMixin:
"""
Mixin class giving base color methods (e.g. colorSpace) which are needed for any color stuff.
"""
@property
def colorSpace(self):
"""The name of the color space currently being used
Value should be: a string or None
For strings and hex values this is not needed.
If None the default colorSpace for the stimulus is
used (defined during initialisation).
Please note that changing colorSpace does not change stimulus
parameters. Thus you usually want to specify colorSpace before
setting the color. Example::
# A light green text
stim = visual.TextStim(win, 'Color me!',
color=(0, 1, 0), colorSpace='rgb')
# An almost-black text
stim.colorSpace = 'rgb255'
# Make it light green again
stim.color = (128, 255, 128)
"""
if hasattr(self, '_colorSpace'):
return self._colorSpace
else:
return 'rgba'
@colorSpace.setter
def colorSpace(self, value):
if value in colorSpaces:
self._colorSpace = value
else:
logging.error(f"'{value}' is not a valid color space")
@property
def contrast(self):
"""A value that is simply multiplied by the color.
Value should be: a float between -1 (negative) and 1 (unchanged).
:ref:`Operations <attrib-operations>` supported.
Set the contrast of the stimulus, i.e. scales how far the stimulus
deviates from the middle grey. You can also use the stimulus
`opacity` to control contrast, but that cannot be negative.
Examples::
stim.contrast = 1.0 # unchanged contrast
stim.contrast = 0.5 # decrease contrast
stim.contrast = 0.0 # uniform, no contrast
stim.contrast = -0.5 # slightly inverted
stim.contrast = -1.0 # totally inverted
Setting contrast outside range -1 to 1 is permitted, but may
produce strange results if color values exceeds the monitor limits.::
stim.contrast = 1.2 # increases contrast
stim.contrast = -1.2 # inverts with increased contrast
"""
if hasattr(self, '_foreColor'):
return self._foreColor.contrast
@contrast.setter
def contrast(self, value):
if hasattr(self, '_foreColor'):
self._foreColor.contrast = value
if hasattr(self, '_fillColor'):
self._fillColor.contrast = value
if hasattr(self, '_borderColor'):
self._borderColor.contrast = value
def setContrast(self, newContrast, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
"""
if newContrast is not None:
self.contrast = newContrast
if operation in ['', '=']:
self.contrast = newContrast
elif operation in ['+']:
self.contrast += newContrast
elif operation in ['-']:
self.contrast -= newContrast
else:
logging.error(f"Operation '{operation}' not recognised.")
def _getDesiredRGB(self, rgb, colorSpace, contrast):
""" Convert color to RGB while adding contrast.
Requires self.rgb, self.colorSpace and self.contrast
"""
col = Color(rgb, colorSpace)
col.contrast *= contrast or 0
return col.render('rgb')
def updateColors(self):
"""Placeholder method to update colours when set externally, for example updating the `pallette` attribute of
a textbox"""
return
class ForeColorMixin(BaseColorMixin, LegacyForeColorMixin):
"""
Mixin class for visual stim that need fore color.
"""
@property
def foreColor(self):
"""Foreground color of the stimulus
Value should be one of:
+ string: to specify a :ref:`colorNames`. Any of the standard
html/X11 `color names
<http://www.w3schools.com/html/html_colornames.asp>`
can be used.
+ :ref:`hexColors`
+ numerically: (scalar or triplet) for DKL, RGB or
other :ref:`colorspaces`. For
these, :ref:`operations <attrib-operations>` are supported.
When color is specified using numbers, it is interpreted with
respect to the stimulus' current colorSpace. If color is given as a
single value (scalar) then this will be applied to all 3 channels.
Examples
--------
For whatever stim you have::
stim.color = 'white'
stim.color = 'RoyalBlue' # (the case is actually ignored)
stim.color = '#DDA0DD' # DDA0DD is hexadecimal for plum
stim.color = [1.0, -1.0, -1.0] # if stim.colorSpace='rgb':
# a red color in rgb space
stim.color = [0.0, 45.0, 1.0] # if stim.colorSpace='dkl':
# DKL space with elev=0, azimuth=45
stim.color = [0, 0, 255] # if stim.colorSpace='rgb255':
# a blue stimulus using rgb255 space
stim.color = 255 # interpreted as (255, 255, 255)
# which is white in rgb255.
:ref:`Operations <attrib-operations>` work as normal for all numeric
colorSpaces (e.g. 'rgb', 'hsv' and 'rgb255') but not for strings, like
named and hex. For example, assuming that colorSpace='rgb'::
stim.color += [1, 1, 1] # increment all guns by 1 value
stim.color *= -1 # multiply the color by -1 (which in this
# space inverts the contrast)
stim.color *= [0.5, 0, 1] # decrease red, remove green, keep blue
You can use `setColor` if you want to set color and colorSpace in one
line. These two are equivalent::
stim.setColor((0, 128, 255), 'rgb255')
# ... is equivalent to
stim.colorSpace = 'rgb255'
stim.color = (0, 128, 255)
"""
if hasattr(self, '_foreColor'):
return self._foreColor.render(self.colorSpace)
@foreColor.setter
def foreColor(self, value):
if isinstance(value, Color):
# If supplied with a Color object, set as that
self._foreColor = value
else:
# Otherwise, make a new Color object
self._foreColor = Color(value, self.colorSpace, contrast=self.contrast)
if not self._foreColor:
self._foreColor = Color()
logging.error(f"'{value}' is not a valid {self.colorSpace} color")
@property
def color(self):
"""Alternative way of setting `foreColor`."""
return self.foreColor
@color.setter
def color(self, value):
self.foreColor = value
def setForeColor(self, color, colorSpace=None, operation='', log=None):
"""Hard setter for foreColor, allows suppression of the log message,
simultaneous colorSpace setting and calls update methods.
"""
setColor(obj=self, colorAttrib="foreColor", color=color, colorSpace=colorSpace or self.colorSpace, operation=operation)
# Trigger color update for components like Textbox which have different behaviours for a hard setter
self.updateColors()
def setColor(self, color, colorSpace=None, operation='', log=None):
self.setForeColor(color, colorSpace=colorSpace, operation=operation, log=log)
class FillColorMixin(BaseColorMixin, LegacyFillColorMixin):
"""
Mixin class for visual stim that need fill color.
"""
@property
def fillColor(self):
"""Set the fill color for the shape."""
if hasattr(self, '_fillColor'):
return getattr(self._fillColor, self.colorSpace) # return self._fillColor.render(self.colorSpace)
@fillColor.setter
def fillColor(self, value):
if isinstance(value, Color):
# If supplied with a color object, set as that
self._fillColor = value
else:
# Otherwise, make a new Color object
self._fillColor = Color(value, self.colorSpace, contrast=self.contrast)
if not self._fillColor:
# If given an invalid color, set as transparent and log error
self._fillColor = Color()
logging.error(f"'{value}' is not a valid {self.colorSpace} color")
@property
def backColor(self):
"""Alternative way of setting fillColor"""
return self.fillColor
@backColor.setter
def backColor(self, value):
self.fillColor = value
def setFillColor(self, color, colorSpace=None, operation='', log=None):
"""Hard setter for fillColor, allows suppression of the log message,
simultaneous colorSpace setting and calls update methods.
"""
setColor(obj=self, colorAttrib="fillColor", color=color, colorSpace=colorSpace or self.colorSpace, operation=operation)
# Trigger color update for components like Textbox which have different behaviours for a hard setter
self.updateColors()
def setBackColor(self, color, colorSpace=None, operation='', log=None):
self.setFillColor(color, colorSpace=None, operation='', log=None)
class BorderColorMixin(BaseColorMixin, LegacyBorderColorMixin):
@property
def borderColor(self):
if hasattr(self, '_borderColor'):
return self._borderColor.render(self.colorSpace)
@borderColor.setter
def borderColor(self, value):
if isinstance(value, Color):
# If supplied with a color object, set as that
self._borderColor = value
else:
# If supplied with a valid color, use it to make a color object
self._borderColor = Color(value, self.colorSpace, contrast=self.contrast)
if not self._borderColor:
# If given an invalid color, set as transparent and log error
self._borderColor = Color()
logging.error(f"'{value}' is not a valid {self.colorSpace} color")
@property
def lineColor(self):
"""Alternative way of setting `borderColor`."""
return self.borderColor
@lineColor.setter
def lineColor(self, value):
self.borderColor = value
def setBorderColor(self, color, colorSpace=None, operation='', log=None):
"""Hard setter for `fillColor`, allows suppression of the log message,
simultaneous colorSpace setting and calls update methods.
"""
setColor(obj=self, colorAttrib="borderColor", color=color, colorSpace=colorSpace or self.colorSpace, operation=operation)
# Trigger color update for components like Textbox which have different behaviours for a hard setter
self.updateColors()
def setLineColor(self, color, colorSpace=None, operation='', log=None):
self.setBorderColor(color, colorSpace=None, operation='', log=None)
class ColorMixin(ForeColorMixin, FillColorMixin, BorderColorMixin):
"""
Mixin class for visual stim that need fill, fore and border color.
"""
class ContainerMixin:
"""Mixin class for visual stim that have verticesPix attrib
and .contains() methods.
"""
def __init__(self):
super(ContainerMixin, self).__init__()
self._verticesBase = numpy.array(
[[0.5, -0.5], [-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5]]) # sqr
self._borderBase = numpy.array(
[[0.5, -0.5], [-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5]]) # sqr
self._rotationMatrix = [[1., 0.], [0., 1.]] # no rotation by default
@property
def verticesPix(self):
"""This determines the coordinates of the vertices for the
current stimulus in pixels, accounting for size, ori, pos and units
"""
# because this is a property getter we can check /on-access/ if it
# needs updating :-)
if self._needVertexUpdate:
self._updateVertices()
return self.__dict__['verticesPix']
@property
def _borderPix(self):
"""Allows for a dynamic border that differs from self.vertices, gets
updated dynamically with identical transformations.
"""
if not hasattr(self, 'border'):
msg = "%s._borderPix requested without .border" % self.name
logging.error(msg)
raise AttributeError(msg)
if self._needVertexUpdate:
self._updateVertices()
return self.__dict__['_borderPix']
def _updateVertices(self):
"""Sets Stim.verticesPix and ._borderPix from pos, size, ori,
flipVert, flipHoriz
"""
verts = numpy.dot(self.vertices, self._rotationMatrix)
# If needed, sub in missing values for flip and anchor
flip = None
if hasattr(self, "flip"):
flip = self.flip
anchor = None
if hasattr(self, "anchor"):
anchor = self.anchor
# Convert to a vertices object if not already
verts = Vertices(verts, obj=self, flip=flip, anchor=anchor).pix
self.__dict__['verticesPix'] = self.__dict__['_borderPix'] = verts
if hasattr(self, '_tesselVertices'): # Shapes need to render from this
tesselVerts = self._tesselVertices
tesselVerts = numpy.dot(tesselVerts, self._rotationMatrix)
# Convert to a vertices object if not already
tesselVerts = Vertices(tesselVerts, obj=self, flip=self.flip, anchor=self.anchor).pix
self.__dict__['verticesPix'] = tesselVerts
self._needVertexUpdate = False
self._needUpdate = True # but we presumably need to update the list
def contains(self, x, y=None, units=None):
"""Returns True if a point x,y is inside the stimulus' border.
Can accept variety of input options:
+ two separate args, x and y
+ one arg (list, tuple or array) containing two vals (x,y)
+ an object with a getPos() method that returns x,y, such
as a :class:`~psychopy.event.Mouse`.
Returns `True` if the point is within the area defined either by its
`border` attribute (if one defined), or its `vertices` attribute if
there is no .border. This method handles
complex shapes, including concavities and self-crossings.
Note that, if your stimulus uses a mask (such as a Gaussian) then
this is not accounted for by the `contains` method; the extent of the
stimulus is determined purely by the size, position (pos), and
orientation (ori) settings (and by the vertices for shape stimuli).
See Coder demos: shapeContains.py
See Coder demos: shapeContains.py
"""
# get the object in pixels
if hasattr(x, 'border'):
xy = x._borderPix # access only once - this is a property
units = 'pix' # we can forget about the units
elif hasattr(x, 'verticesPix'):
# access only once - this is a property (slower to access)
xy = x.verticesPix
units = 'pix' # we can forget about the units
elif hasattr(x, 'getPos'):
xy = x.getPos()
units = x.units
elif type(x) in [list, tuple, numpy.ndarray]:
xy = numpy.array(x)
else:
xy = numpy.array((x, y))
# try to work out what units x,y has
if units is None:
if hasattr(xy, 'units'):
units = xy.units
else:
units = self.units
if units != 'pix':
xy = convertToPix(xy, pos=(0, 0), units=units, win=self.win)
# ourself in pixels
if hasattr(self, 'border'):
poly = self._borderPix # e.g., outline vertices
elif hasattr(self, 'boundingBox'):
if abs(self.ori) > 0.1:
raise RuntimeError("TextStim.contains() doesn't currently "
"support rotated text.")
w, h = self.boundingBox # e.g., outline vertices
x, y = self.posPix
poly = numpy.array([[x+w/2, y-h/2], [x-w/2, y-h/2],
[x-w/2, y+h/2], [x+w/2, y+h/2]])
else:
poly = self.verticesPix # e.g., tessellated vertices
return pointInPolygon(xy[0], xy[1], poly=poly)
def overlaps(self, polygon):
"""Returns `True` if this stimulus intersects another one.
If `polygon` is another stimulus instance, then the vertices
and location of that stimulus will be used as the polygon.
Overlap detection is typically very good, but it
can fail with very pointy shapes in a crossed-swords configuration.
Note that, if your stimulus uses a mask (such as a Gaussian blob)
then this is not accounted for by the `overlaps` method; the extent
of the stimulus is determined purely by the size, pos, and
orientation settings (and by the vertices for shape stimuli).
See coder demo, shapeContains.py
"""
return polygonsOverlap(self, polygon)
class TextureMixin:
"""Mixin class for visual stim that have textures.
Could move visual.helpers.setTexIfNoShaders() into here.
Parameters
----------
tex : Any
Texture data. Value can be anything that resembles image data.
id : int or :class:`~pyglet.gl.GLint`
Texture ID.
pixFormat : :class:`~pyglet.gl.GLenum` or int
Pixel format to use, values can be `GL_ALPHA` or `GL_RGB`.
stim : Any
Stimulus object using the texture.
res : int
The resolution of the texture (unless a bitmap image is used).
maskParams : dict or None
Additional parameters to configure the mask used with this texture.
forcePOW2 : bool
Force the texture to be stored in a square memory area. For grating
stimuli (anything that needs multiple cycles) `forcePOW2` should be
set to be `True`. Otherwise the wrapping of the texture will not
work.
dataType : class:`~pyglet.gl.GLenum`, int or None
None, `GL_UNSIGNED_BYTE`, `GL_FLOAT`. Only affects image files
(numpy arrays will be float).
wrapping : bool
Enable wrapping of the texture. A texture will be set to repeat (or
tile).
"""
def _createTexture(self, tex, id, pixFormat, stim, res=128, maskParams=None,
forcePOW2=True, dataType=None, wrapping=True):
# transform all variants of `None` to that, simplifies conditions below
if tex in ["none", "None", "color"]:
tex = None
# Create an intensity texture, ranging -1:1.0
notSqr = False # most of the options will be creating a sqr texture
wasImage = False # change this if image loading works
interpolate = stim.interpolate
if dataType is None:
if pixFormat == GL.GL_RGB:
dataType = GL.GL_FLOAT
else:
dataType = GL.GL_UNSIGNED_BYTE
# Fill out unspecified portions of maskParams with default values
if maskParams is None:
maskParams = {}
# fringeWidth affects the proportion of the stimulus diameter that is
# devoted to the raised cosine.
allMaskParams = {'fringeWidth': 0.2, 'sd': 3}
allMaskParams.update(maskParams)
if type(tex) == numpy.ndarray:
# handle a numpy array
# for now this needs to be an NxN intensity array
intensity = tex.astype(numpy.float32)
if intensity.max() > 1 or intensity.min() < -1:
logging.error('numpy arrays used as textures should be in '
'the range -1(black):1(white)')
if len(tex.shape) == 3:
wasLum = False
else:
wasLum = True
# is it 1D?
if tex.shape[0] == 1:
stim._tex1D = True
res = tex.shape[1]
elif len(tex.shape) == 1 or tex.shape[1] == 1:
stim._tex1D = True
res = tex.shape[0]
else:
stim._tex1D = False
# check if it's a square power of two
maxDim = max(tex.shape)
powerOf2 = 2 ** numpy.ceil(numpy.log2(maxDim))
if (forcePOW2 and
(tex.shape[0] != powerOf2 or
tex.shape[1] != powerOf2)):
logging.error("Requiring a square power of two (e.g. "
"16 x 16, 256 x 256) texture but didn't "
"receive one")
res = tex.shape[0]
dataType = GL.GL_FLOAT
elif tex in ("sin", "sqr", "saw", "tri", "sinXsin", "sqrXsqr", "circle",
"gauss", "cross", "radRamp", "raisedCos", None):
if tex is None:
res = 1
wrapping = True # override any wrapping setting for None
# compute array of intensity value for desired pattern
intensity = createLumPattern(tex, res, None, allMaskParams)
wasLum = True
else:
if isinstance(tex, (str, Path)):
# maybe tex is the name of a file:
filename = findImageFile(tex, checkResources=True)
if not filename:
msg = "Couldn't find image %s; check path? (tried: %s)"
logging.error(msg % (tex, os.path.abspath(tex)))
logging.flush()
raise IOError(msg % (tex, os.path.abspath(tex)))
try:
im = Image.open(filename)
im = im.transpose(Image.FLIP_TOP_BOTTOM)
except IOError:
msg = "Found file '%s', failed to load as an image"
logging.error(msg % (filename))
logging.flush()
msg = "Found file '%s' [= %s], failed to load as an image"
raise IOError(msg % (tex, os.path.abspath(tex)))
else:
# can't be a file; maybe its an image already in memory?
try:
im = tex.copy().transpose(Image.FLIP_TOP_BOTTOM)
except AttributeError: # nope, not an image in memory
msg = "Couldn't make sense of requested image."
logging.error(msg)
logging.flush()
raise AttributeError(msg)
# at this point we have a valid im
stim._origSize = im.size
wasImage = True
# is it 1D?
if im.size[0] == 1 or im.size[1] == 1:
logging.error("Only 2D textures are supported at the moment")
else:
maxDim = max(im.size)
powerOf2 = int(2**numpy.ceil(numpy.log2(maxDim)))
if im.size[0] != powerOf2 or im.size[1] != powerOf2:
if not forcePOW2:
notSqr = True
elif globalVars.nImageResizes < reportNImageResizes:
msg = ("Image '%s' was not a square power-of-two ' "
"'image. Linearly interpolating to be %ix%i")
logging.warning(msg % (tex, powerOf2, powerOf2))
globalVars.nImageResizes += 1
im = im.resize([powerOf2, powerOf2], Image.BILINEAR)
elif globalVars.nImageResizes == reportNImageResizes:
logging.warning("Multiple images have needed resizing"
" - I'll stop bothering you!")
im = im.resize([powerOf2, powerOf2], Image.BILINEAR)
# is it Luminance or RGB?
if pixFormat == GL.GL_ALPHA and im.mode != 'L':
# we have RGB and need Lum
wasLum = True
im = im.convert("L") # force to intensity (need if was rgb)
elif im.mode == 'L': # we have lum and no need to change
wasLum = True
dataType = GL.GL_FLOAT
elif pixFormat == GL.GL_RGB:
# we want RGB and might need to convert from CMYK or Lm
# texture = im.tostring("raw", "RGB", 0, -1)
im = im.convert("RGBA")
wasLum = False
else:
raise ValueError('cannot determine if image is luminance or RGB')
if dataType == GL.GL_FLOAT:
# convert from ubyte to float
# much faster to avoid division 2/255
intensity = numpy.array(im).astype(
numpy.float32) * 0.0078431372549019607 - 1.0
else:
intensity = numpy.array(im)
if pixFormat == GL.GL_RGB and wasLum and dataType == GL.GL_FLOAT:
# grating stim on good machine
# keep as float32 -1:1
if (sys.platform != 'darwin' and
stim.win.glVendor.startswith('nvidia')):
# nvidia under win/linux might not support 32bit float
# could use GL_LUMINANCE32F_ARB here but check shader code?
internalFormat = GL.GL_RGB16F_ARB
else:
# we've got a mac or an ATI card and can handle
# 32bit float textures
# could use GL_LUMINANCE32F_ARB here but check shader code?
internalFormat = GL.GL_RGB32F_ARB
# initialise data array as a float
data = numpy.ones((intensity.shape[0], intensity.shape[1], 3),
numpy.float32)
data[:, :, 0] = intensity # R
data[:, :, 1] = intensity # G
data[:, :, 2] = intensity # B
elif (pixFormat == GL.GL_RGB and
wasLum and
dataType != GL.GL_FLOAT):
# was a lum image: stick with ubyte for speed
internalFormat = GL.GL_RGB
# initialise data array as a float
data = numpy.ones((intensity.shape[0], intensity.shape[1], 3),
numpy.ubyte)
data[:, :, 0] = intensity # R
data[:, :, 1] = intensity # G
data[:, :, 2] = intensity # B
elif pixFormat == GL.GL_RGB and dataType == GL.GL_FLOAT:
# probably a custom rgb array or rgb image
internalFormat = GL.GL_RGB32F_ARB
data = intensity
elif pixFormat == GL.GL_RGB:
# not wasLum, not useShaders - an RGB bitmap with no shader
# optionsintensity.min()
internalFormat = GL.GL_RGB
data = intensity # float_uint8(intensity)
elif pixFormat == GL.GL_ALPHA:
internalFormat = GL.GL_ALPHA
dataType = GL.GL_UNSIGNED_BYTE
if wasImage:
data = intensity
else:
data = float_uint8(intensity)
else:
raise ValueError("invalid or unsupported `pixFormat`")
# check for RGBA textures
if len(data.shape) > 2 and data.shape[2] == 4:
if pixFormat == GL.GL_RGB:
pixFormat = GL.GL_RGBA
if internalFormat == GL.GL_RGB:
internalFormat = GL.GL_RGBA
elif internalFormat == GL.GL_RGB32F_ARB:
internalFormat = GL.GL_RGBA32F_ARB
texture = data.ctypes # serialise
# bind the texture in openGL
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glBindTexture(GL.GL_TEXTURE_2D, id) # bind that name to the target
# makes the texture map wrap (this is actually default anyway)
if wrapping:
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
else:
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP)
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP)
# data from PIL/numpy is packed, but default for GL is 4 bytes
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
# important if using bits++ because GL_LINEAR
# sometimes extrapolates to pixel vals outside range
if interpolate:
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL_GENERATE_MIPMAP was only available from OpenGL 1.4
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_GENERATE_MIPMAP,
GL.GL_TRUE)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, internalFormat,
data.shape[1], data.shape[0], 0,
pixFormat, dataType, texture)
else:
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, internalFormat,
data.shape[1], data.shape[0], 0,
pixFormat, dataType, texture)
GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE,
GL.GL_MODULATE) # ?? do we need this - think not!
# unbind our texture so that it doesn't affect other rendering
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
return wasLum
def clearTextures(self):
"""Clear all textures associated with the stimulus.
As of v1.61.00 this is called automatically during garbage collection
of your stimulus, so doesn't need calling explicitly by the user.
"""
GL.glDeleteTextures(1, self._texID)
if hasattr(self, '_maskID'):
GL.glDeleteTextures(1, self._maskID)
@attributeSetter
def mask(self, value):
"""The alpha mask (forming the shape of the image).
This can be one of various options:
* 'circle', 'gauss', 'raisedCos', 'cross'
* **None** (resets to default)
* the name of an image file (most formats supported)
* a numpy array (1xN or NxN) ranging -1:1
"""
self.__dict__['mask'] = value
if self.__class__.__name__ == 'ImageStim':
dataType = GL.GL_UNSIGNED_BYTE
else:
dataType = None
self._createTexture(
value, id=self._maskID, pixFormat=GL.GL_ALPHA, dataType=dataType,
stim=self, res=self.texRes, maskParams=self.maskParams,
wrapping=False)
def setMask(self, value, log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message.
"""
setAttribute(self, 'mask', value, log)
@attributeSetter
def texRes(self, value):
"""Power-of-two int. Sets the resolution of the mask and texture.
texRes is overridden if an array or image is provided as mask.
:ref:`Operations <attrib-operations>` supported.
"""
self.__dict__['texRes'] = value
# ... now rebuild textures (call attributeSetters without logging).
if hasattr(self, 'tex'):
setAttribute(self, 'tex', self.tex, log=False)
if hasattr(self, 'mask'):
setAttribute(self, 'mask', self.mask, log=False)
@attributeSetter
def maskParams(self, value):
"""Various types of input. Default to `None`.
This is used to pass additional parameters to the mask if those are
needed.
- For 'gauss' mask, pass dict {'sd': 5} to control
standard deviation.
- For the 'raisedCos' mask, pass a dict: {'fringeWidth':0.2},
where 'fringeWidth' is a parameter (float, 0-1), determining
the proportion of the patch that will be blurred by the raised
cosine edge."""
self.__dict__['maskParams'] = value
# call attributeSetter without log
setAttribute(self, 'mask', self.mask, log=False)
@attributeSetter
def interpolate(self, value):
"""Whether to interpolate (linearly) the texture in the stimulus.
If set to False then nearest neighbour will be used when needed,
otherwise some form of interpolation will be used.
"""
self.__dict__['interpolate'] = value
class WindowMixin:
"""Window-related attributes and methods.
Used by BaseVisualStim, SimpleImageStim and ElementArrayStim.
"""
@property
def win(self):
"""The :class:`~psychopy.visual.Window` object in which the
stimulus will be rendered by default. (required)
Example, drawing same stimulus in two different windows and display
simultaneously. Assuming that you have two windows and a stimulus
(win1, win2 and stim)::
stim.win = win1 # stimulus will be drawn in win1
stim.draw() # stimulus is now drawn to win1
stim.win = win2 # stimulus will be drawn in win2
stim.draw() # it is now drawn in win2
win1.flip(waitBlanking=False) # do not wait for next
# monitor update
win2.flip() # wait for vertical blanking.
Note that this just changes **default** window for stimulus.
You could also specify window-to-draw-to when drawing::
stim.draw(win1)
stim.draw(win2)
"""
return self.__dict__['win']
@win.setter
def win(self, value):
self.__dict__['win'] = value
# Update window ref in size and pos objects
if hasattr(self, "_size") and isinstance(self._size, Vector):
self._size.win = value
if hasattr(self, "_pos") and isinstance(self._pos, Vector):
self._pos.win = value
@property
def pos(self):
if hasattr(self, "_pos"):
return getattr(self._pos, self.units)
@pos.setter
def pos(self, value):
self._pos = Position(value, units=self.units, win=self.win)
@property
def size(self):
if hasattr(self, "_size"):
return getattr(self._size, self.units)
@size.setter
def size(self, value):
if value is None:
value = (None, None)
self._size = Size(value, units=self.units, win=self.win)
@property
def width(self):
if len(self.size.shape) == 1:
# Return first value if a 1d array
return self.size[0]
elif len(self.size.shape) == 2:
# Return first column if a 2d array
return self.size[:, 0]
@width.setter
def width(self, value):
# Convert to a numpy array
value = numpy.array(value)
# Set size
if len(self.size.shape) == 1:
# Set first value if a 1d array
self.size[0] = value
elif len(self.size.shape) == 2:
# Set first column if a 2d array
self.size[:, 0] = value
@property
def height(self):
if len(self.size.shape) == 1:
# Return first value if a 1d array
return self.size[1]
elif len(self.size.shape) == 2:
# Return first column if a 2d array
return self.size[:, 1]
@height.setter
def height(self, value):
# Convert to a numpy array
value = numpy.array(value)
# Set size
if len(self.size.shape) == 1:
# Set first value if a 1d array
self.size[1] = value
elif len(self.size.shape) == 2:
# Set first column if a 2d array
self.size[:, 1] = value
@property
def vertices(self):
# Get or make Vertices object
if hasattr(self, "_vertices"):
verts = self._vertices
else:
# If not defined, assume vertices are just a square
verts = self._vertices = Vertices(numpy.array([
[0.5, -0.5],
[-0.5, -0.5],
[-0.5, 0.5],
[0.5, 0.5],
]), obj=self, flip=self.flip, anchor=self.anchor)
return verts.base
@vertices.setter
def vertices(self, value):
# If None, use defaut
if value is None:
value = [
[0.5, -0.5],
[-0.5, -0.5],
[-0.5, 0.5],
[0.5, 0.5],
]
# Create Vertices object
self._vertices = Vertices(value, obj=self, flip=self.flip, anchor=self.anchor)
@property
def flip(self):
"""
1x2 array for flipping vertices along each axis; set as True to flip or False to not flip. If set as a single value, will duplicate across both axes. Accessing the protected attribute (`._flip`) will give an array of 1s and -1s with which to multiply vertices.
"""
# Get base value
if hasattr(self, "_flip"):
flip = self._flip
else:
flip = numpy.array([[False, False]])
# Convert from boolean
return flip == -1
@flip.setter
def flip(self, value):
if value is None:
value = False
# Convert to 1x2 numpy array
value = numpy.array(value)
value.resize((1, 2))
# Ensure values were bool
assert value.dtype == bool, "Flip values must be either a boolean (True/False) or an array of booleans"
# Set as multipliers rather than bool
self._flip = numpy.array([[
-1 if value[0, 0] else 1,
-1 if value[0, 1] else 1,
]])
self._flipHoriz, self._flipVert = self._flip[0]
# Apply to vertices
if not hasattr(self, "_vertices"):
self.vertices = None
self._vertices.flip = self.flip
# Mark as needing vertex update
self._needVertexUpdate = True
@property
def flipHoriz(self):
return self.flip[0][0]
@flipHoriz.setter
def flipHoriz(self, value):
self.flip = [value, self.flip[0, 1]]
@property
def flipVert(self):
return self.flip[0][1]
@flipVert.setter
def flipVert(self, value):
self.flip = [self.flip[0, 0], value]
@property
def anchor(self):
if hasattr(self, "_vertices"):
return self._vertices.anchor
elif hasattr(self, "_anchor"):
# Return a backup value if there's no vertices yet
return self._anchor
@anchor.setter
def anchor(self, value):
if hasattr(self, "_vertices"):
self._vertices.anchor = value
else:
# Set a backup value if there's no vertices yet
self._anchor = value
def setAnchor(self, value, log=None):
setAttribute(self, 'anchor', value, log)
@property
def units(self):
if hasattr(self, "_units"):
return self._units
else:
return self.win.units
@units.setter
def units(self, value):
"""
Units to use when drawing.
Possible options are: None, 'norm', 'cm', 'deg', 'degFlat',
'degFlatPos', or 'pix'.
If None then the current units of the
:class:`~psychopy.visual.Window` will be used.
See :ref:`units` for explanation of other options.
Note that when you change units, you don't change the stimulus
parameters and it is likely to change appearance.
Example::
# This stimulus is 20% wide and 50% tall with respect to window
stim = visual.PatchStim(win, units='norm', size=(0.2, 0.5)
# This stimulus is 0.2 degrees wide and 0.5 degrees tall.
stim.units = 'deg'
"""
if value in unitTypes:
self._units = value or self.win.units
self._needVertexUpdate = True
else:
raise ValueError(f"Invalid unit type '{value}', must be one of: {unitTypes}")
def draw(self):
raise NotImplementedError('Stimulus classes must override '
'visual.BaseVisualStim.draw')
def _selectWindow(self, win):
"""Switch drawing to the specified window. Calls the window's
_setCurrent() method which handles the switch.
"""
win._setCurrent()
def _updateList(self):
"""The user shouldn't need this method since it gets called
after every call to .set()
Chooses between using and not using shaders each call.
"""
self._updateListShaders()
class BaseVisualStim(MinimalStim, WindowMixin, LegacyVisualMixin):
"""A template for a visual stimulus class.
Actual visual stim like GratingStim, TextStim etc... are based on this.
Not finished...?
Methods defined here will override Minimal & Legacy, but best to avoid
that for simplicity & clarity.
"""
def __init__(self, win, units=None, name='', autoLog=None):
self.autoLog = False # just to start off during init, set at end
self.win = win
self.units = units
self._rotationMatrix = [[1., 0.], [0., 1.]] # no rotation by default
# self.autoLog is set at end of MinimalStim.__init__
super(BaseVisualStim, self).__init__(name=name, autoLog=autoLog)
if self.autoLog:
msg = ("%s is calling BaseVisualStim.__init__() with autolog=True"
". Set autoLog to True only at the end of __init__())")
logging.warning(msg % (self.__class__.__name__))
@property
def opacity(self):
"""Determines how visible the stimulus is relative to background.
The value should be a single float ranging 1.0 (opaque) to 0.0
(transparent). :ref:`Operations <attrib-operations>` are supported.
Precisely how this is used depends on the :ref:`blendMode`.
"""
alphas = []
if hasattr(self, '_foreColor'):
alphas.append(self._foreColor.alpha)
if hasattr(self, '_fillColor'):
alphas.append(self._fillColor.alpha)
if hasattr(self, '_borderColor'):
alphas.append(self._borderColor.alpha)
if alphas:
return mean(alphas)
else:
return 1
@opacity.setter
def opacity(self, value):
# Setting opacity as a single value makes all colours the same opacity
if value is None:
# If opacity is set to be None, this indicates that each color should handle its own opacity
return
if hasattr(self, '_foreColor'):
if self._foreColor != None:
self._foreColor.alpha = value
if hasattr(self, '_fillColor'):
if self._fillColor != None:
self._fillColor.alpha = value
if hasattr(self, '_borderColor'):
if self._borderColor != None:
self._borderColor.alpha = value
def updateOpacity(self):
"""Placeholder method to update colours when set externally, for example
updating the `pallette` attribute of a textbox."""
return
@attributeSetter
def ori(self, value):
"""The orientation of the stimulus (in degrees).
Should be a single value (:ref:`scalar <attrib-scalar>`).
:ref:`Operations <attrib-operations>` are supported.
Orientation convention is like a clock: 0 is vertical, and positive
values rotate clockwise. Beyond 360 and below zero values wrap
appropriately.
"""
self.__dict__['ori'] = float(value)
radians = value * 0.017453292519943295
sin, cos = numpy.sin, numpy.cos
self._rotationMatrix = numpy.array([[cos(radians), -sin(radians)],
[sin(radians), cos(radians)]])
self._needVertexUpdate = True # need to update update vertices
self._needUpdate = True
@property
def size(self):
"""The size (width, height) of the stimulus in the stimulus
:ref:`units <units>`
Value should be :ref:`x,y-pair <attrib-xy>`,
:ref:`scalar <attrib-scalar>` (applies to both dimensions)
or None (resets to default). :ref:`Operations <attrib-operations>`
are supported.
Sizes can be negative (causing a mirror-image reversal) and can
extend beyond the window.
Example::
stim.size = 0.8 # Set size to (xsize, ysize) = (0.8, 0.8)
print(stim.size) # Outputs array([0.8, 0.8])
stim.size += (0.5, -0.5) # make wider and flatter: (1.3, 0.3)
Tip: if you can see the actual pixel range this corresponds to by
looking at `stim._sizeRendered`
"""
return WindowMixin.size.fget(self)
@size.setter
def size(self, value):
# Supply default for None
if value is None:
value = Size((1, 1), units="height", win=self.win)
# Duplicate single values
if isinstance(value, (float, int)):
value = (value, value)
# Do setting
WindowMixin.size.fset(self, value)
# Mark any updates needed
self._needVertexUpdate = True
self._needUpdate = True
if hasattr(self, '_calcCyclesPerStim'):
self._calcCyclesPerStim()
@property
def pos(self):
"""
The position of the center of the stimulus in the stimulus
:ref:`units <units>`
`value` should be an :ref:`x,y-pair <attrib-xy>`.
:ref:`Operations <attrib-operations>` are also supported.
Example::
stim.pos = (0.5, 0) # Set slightly to the right of center
stim.pos += (0.5, -1) # Increment pos rightwards and upwards.
Is now (1.0, -1.0)
stim.pos *= 0.2 # Move stim towards the center.
Is now (0.2, -0.2)
Tip: If you need the position of stim in pixels, you can obtain
it like this::
from psychopy.tools.monitorunittools import posToPix
posPix = posToPix(stim)
"""
return WindowMixin.pos.fget(self)
@pos.setter
def pos(self, value):
# Supply defualt for None
if value is None:
value = Position((0, 0), units="height", win=self.win)
# Do setting
WindowMixin.pos.fset(self, value)
# Mark any updates needed
self._needVertexUpdate = True
self._needUpdate = True
def setPos(self, newPos, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message.
"""
setAttribute(self, 'pos', val2array(newPos, False), log, operation)
def setDepth(self, newDepth, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
"""
setAttribute(self, 'depth', newDepth, log, operation)
def setSize(self, newSize, operation='', units=None, log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
"""
if units is None:
# need to change this to create several units from one
units = self.units
setAttribute(self, 'size', val2array(newSize, False), log, operation)
def setOri(self, newOri, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
"""
setAttribute(self, 'ori', newOri, log, operation)
def setOpacity(self, newOpacity, operation='', log=None):
"""Hard setter for opacity, allows the suppression of log messages and calls the update method
"""
if operation in ['', '=']:
self.opacity = newOpacity
elif operation in ['+']:
self.opacity += newOpacity
elif operation in ['-']:
self.opacity -= newOpacity
else:
logging.error(f"Operation '{operation}' not recognised.")
# Trigger color update for components like Textbox which have different behaviours for a hard setter
self.updateOpacity()
def _set(self, attrib, val, op='', log=None):
"""DEPRECATED since 1.80.04 + 1.
Use setAttribute() and val2array() instead.
"""
# format the input value as float vectors
if type(val) in [tuple, list, numpy.ndarray]:
val = val2array(val)
# Set attribute with operation and log
setAttribute(self, attrib, val, log, op)
# For DotStim
if attrib in ('nDots', 'coherence'):
self.coherence = round(self.coherence * self.nDots) / self.nDots
| psychopy/psychopy | psychopy/visual/basevisual.py | Python | gpl-3.0 | 66,313 | [
"Gaussian"
] | 721de89cd44b9e4cec78378578e8f56887c69df28fa77ee85ed10e7e8c1e7d09 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*********************************************
**espresso.interaction.PotentialVSpherePair**
*********************************************
"""
from espresso import pmi
from espresso import toReal3DFromVector
from _espresso import interaction_PotentialVSpherePair
# Python base class for potentials
class PotentialVSpherePairLocal(object):
def computeEnergy(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
return self.cxxclass.computeEnergy(self, arg0)
return self.cxxclass.computeEnergy(self, toReal3DFromVector(*args))
def computeForce(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
newargs = [arg0, 0, 0]
return self.cxxclass.computeForce(self, toReal3DFromVector(*newargs))[0]
return self.cxxclass.computeForce(self, toReal3DFromVector(*args))
def _setShift(self, shift="auto"):
if (shift == "auto"):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setAutoShift(self)
else:
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.shift.fset(self, shift)
def _getShift(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.shift.fget(self)
shift = property(_getShift, _setShift)
if pmi.isController:
class PotentialVSpherePair(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
localcall = [ 'computeForce', 'computeEnergy' ],
pmiproperty = ['cutoff', 'shift']
)
# class PythonPotentialLocal(potential_PythonPotential):
# def getCutoffSqr(self):
# pass
# def computeForce(self, *args):
# """Override this method to compute the force for a given distance.
# It should at least be able to handle a Real3D distance input.
# """
# pass
# def computeEnergy(self, *args):
# """Override this method to compute the energy at a given distance.
# It should at least be able to handle a Real3D distance input.
# """
# pass
| BackupTheBerlios/espressopp | src/interaction/PotentialVSpherePair.py | Python | gpl-3.0 | 3,610 | [
"ESPResSo"
] | a75b852ae292935ffc345fe8e0765f43d90e8506880e76cd4ab265ad67da3700 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import json
from monty.json import MontyDecoder
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.xas.spectrum import XAS
from pymatgen.vis.plotters import SpectrumPlotter
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files/spectrum_test")
with open(os.path.join(test_dir, 'LiCoO2_k_xanes.json')) as fp:
spect_data_dict = json.load(fp, cls=MontyDecoder)
class SpectrumPlotterTest(PymatgenTest):
def setUp(self):
self.xanes = XAS.from_dict(spect_data_dict)
def test_get_plot(self):
self.plotter = SpectrumPlotter(yshift=0.2)
self.plotter.add_spectrum("LiCoO2", self.xanes)
xanes = self.xanes.copy()
xanes.y += np.random.randn(len(xanes.y)) * 0.005
self.plotter.add_spectrum("LiCoO2 + noise", xanes)
self.plotter.add_spectrum("LiCoO2 - replot", xanes, "k")
plt = self.plotter.get_plot()
self.plotter.save_plot("spectrum_plotter_test.eps")
os.remove("spectrum_plotter_test.eps")
plt.close('all')
def test_get_stacked_plot(self):
self.plotter = SpectrumPlotter(yshift=0.2, stack=True)
self.plotter.add_spectrum("LiCoO2", self.xanes, "b")
xanes = self.xanes.copy()
xanes.y += np.random.randn(len(xanes.y)) * 0.005
self.plotter.add_spectrum("LiCoO2 + noise", xanes, "r")
plt = self.plotter.get_plot()
plt.close('all')
if __name__ == '__main__':
unittest.main()
| gVallverdu/pymatgen | pymatgen/vis/tests/test_plotters.py | Python | mit | 1,657 | [
"pymatgen"
] | 4b9e6ffc8ab32492dd10383f7908dc17f7e8353ffe40c619040db977ceaf6e8c |
#!/usr/bin/env python
"""
PYTHONPATH="." luigi-deps-tree --module test_luigi ReactionPath --id- 1
PYTHONPATH="." luigi --module test_luigi ReactionPath --local-scheduler --id- 1
python test_luigi.py inp/input.xyz 53 9 -1 11 54 -1 9 11 1
"""
import argparse
import os
from pathlib import Path
import shutil
import sys
import tempfile
import luigi
import numpy as np
import psutil
from pysisyphus.calculators import XTB
from pysisyphus.constants import AU2KJPERMOL
from pysisyphus.helpers import geom_loader, do_final_hessian, highlight_text
from pysisyphus.intcoords.helpers import get_weighted_bond_mode_getter
from pysisyphus.cos.GrowingNT import GrowingNT
from pysisyphus.optimizers.RFOptimizer import RFOptimizer
from pysisyphus.optimizers.PreconLBFGS import PreconLBFGS
from pysisyphus.tsoptimizers.RSPRFOptimizer import RSPRFOptimizer
from pysisyphus.irc import EulerPC
class Params(luigi.Config):
id_ = luigi.IntParameter()
base = luigi.Parameter(default="out")
step = luigi.IntParameter(default=0)
@property
def key(self):
return f"{self.id_:04d}_{self.step:02d}"
@property
def out_dir(self):
return Path(f"{self.base}_{self.id_:04d}")
@property
def step_str(self):
return f"{self.step:02d}"
def get_path(self, fn):
out_dir = self.out_dir
if not out_dir.exists():
os.mkdir(out_dir)
return out_dir / f"{self.step_str}_{fn}"
def get_prefix(self, prefix):
return f"{self.step_str}_{prefix}"
def get_calc(self):
return XTB(
pal=psutil.cpu_count(logical=False),
quiet=True,
retry_etemp=1000,
gbsa="methanol",
out_dir=self.get_path("qm_calcs/"),
)
def backup_from_dir(self, dir_, fn, dest_fn=None):
if dest_fn is None:
dest_fn = self.get_path(fn)
shutil.copy(Path(dir_) / fn, dest_fn)
class Minimum(Params, luigi.Task):
def output(self):
return luigi.LocalTarget(self.get_path("input.xyz"))
def run(self):
geom = GEOMS[self.id_]
with self.output().open("w") as handle:
handle.write(geom.as_xyz())
class GNT(Params, luigi.Task):
def output(self):
return luigi.LocalTarget(self.get_path("gnt_ts.xyz"))
def requires(self):
return Minimum(self.id_, self.base, self.step)
def run(self):
print(highlight_text(f"GNT {self.key}"))
geom = geom_loader(self.input().path)
geom.set_calculator(self.get_calc())
bonds = get_weighted_bond_mode(geom.atoms, geom.coords3d)
print(f"@@@ {self.key}: Using bonds: {bonds}")
gnt_kwargs = {
"step_len": 0.1,
"bonds": bonds,
"stop_after_ts": True,
"rms_thresh": 0.003,
}
gnt = GrowingNT(geom, **gnt_kwargs)
with tempfile.TemporaryDirectory() as tmp_dir:
opt_kwargs = {
"max_cycles": 1_000,
"dump": True,
"out_dir": tmp_dir,
"prefix": "gnt",
}
opt = PreconLBFGS(gnt, **opt_kwargs)
opt.run()
self.backup_from_dir(tmp_dir, "gnt_optimization.trj")
with self.output().open("w") as handle:
handle.write(gnt.ts_images[0].as_xyz())
class TSOpt(Params, luigi.Task):
def output(self):
return (
luigi.LocalTarget(self.get_path("ts_opt.xyz")),
luigi.LocalTarget(self.get_path("ts_hessian.h5")),
luigi.LocalTarget(self.get_path("imaginary_mode_000.trj")),
)
def requires(self):
return GNT(self.id_, self.base, self.step)
def run(self):
print(highlight_text(f"TSOPT {self.key}"))
geom = geom_loader(
self.input().path,
coord_type="redund",
coord_kwargs={
"rebuild": False,
},
)
geom.set_calculator(self.get_calc())
with tempfile.TemporaryDirectory() as tmp_dir:
tsopt = RSPRFOptimizer(
# tsopt = RSIRFOptimizer(
geom,
max_cycles=75,
out_dir=tmp_dir,
prefix="ts",
dump=True,
hessian_recalc=5,
trust_max=0.3,
overachieve_factor=3.0,
max_energy_incr=0.25,
)
tsopt.run()
self.backup_from_dir(tmp_dir, "ts_optimization.trj")
assert tsopt.is_converged
xyz_out, hess_out, imag_out = self.output()
with xyz_out.open("w") as handle:
handle.write(geom.as_xyz())
do_final_hessian(geom, out_dir=tmp_dir, write_imag_modes=True)
self.backup_from_dir(tmp_dir, "final_hessian.h5", hess_out.path)
self.backup_from_dir(tmp_dir, "imaginary_mode_000.trj", imag_out.path)
class IRC(Params, luigi.Task):
def output(self):
return (
luigi.LocalTarget(self.get_path("irc_first.xyz")),
luigi.LocalTarget(self.get_path("irc_last.xyz")),
)
def requires(self):
return TSOpt(self.id_, self.base, self.step)
def run(self):
print(highlight_text(f"IRC {self.key}"))
ts_xyz, ts_hess, ts_imag = self.input()
geom = geom_loader(ts_xyz.path)
geom.set_calculator(self.get_calc())
with tempfile.TemporaryDirectory() as tmp_dir:
irc = EulerPC(
geom,
out_dir=tmp_dir,
hessian_init=ts_hess.path,
hessian_recalc=10,
rms_grad_thresh=0.0005,
imag_below=-35,
)
irc.run()
self.backup_from_dir(tmp_dir, "finished_irc.trj")
first_geom = geom.copy()
first_geom.coords = irc.all_coords[0]
last_geom = geom.copy()
last_geom.coords = irc.all_coords[-1]
for geom_, target in zip((first_geom, last_geom), self.output()):
with target.open("w") as handle:
handle.write(geom_.as_xyz())
class EndOpt(Params, luigi.Task):
pref_map = {
"first": 0,
"last": -1,
}
prefix = luigi.Parameter()
@property
def pref_(self):
return f"{self.prefix}_endopt"
def output(self):
return (
luigi.LocalTarget(self.get_path(f"{self.pref_}.xyz")),
luigi.LocalTarget(self.get_path(f"{self.pref_}_hessian.h5")),
)
def requires(self):
return IRC(self.id_, self.base, self.step)
def run(self):
print(highlight_text(f"EndOpt {self.key}"))
ind = self.pref_map[self.prefix]
geom = geom_loader(self.input()[ind].path, coord_type="tric")
geom.set_calculator(self.get_calc())
with tempfile.TemporaryDirectory() as tmp_dir:
opt = RFOptimizer(
geom,
dump=True,
overachieve_factor=3.0,
out_dir=tmp_dir,
thresh="gau",
hessian_recalc=10,
)
opt.run()
assert opt.is_converged
do_final_hessian(geom, out_dir=tmp_dir)
out_xyz, out_hess = self.output()
with out_xyz.open("w") as handle:
handle.write(geom.as_xyz())
self.backup_from_dir(tmp_dir, "final_hessian.h5", out_hess.path)
class FirstEndOpt(EndOpt):
prefix = "first"
class LastEndOpt(EndOpt):
prefix = "last"
class ReactionPath(Params, luigi.Task):
def output(self):
return (
luigi.LocalTarget(self.get_path("first_endopt_hessian.h5")),
luigi.LocalTarget(self.get_path("ts_hessian.h5")),
luigi.LocalTarget(self.get_path("last_endopt_hessian.h5")),
)
def requires(self):
return (
Minimum(self.id_, self.base, self.step),
FirstEndOpt(self.id_, self.base, self.step),
TSOpt(self.id_, self.base, self.step),
LastEndOpt(self.id_, self.base, self.step),
)
def run(self):
if self.step >= 1:
self.complete = lambda: True
minimum, first_endopt, tsopt, last_endopt = self.input()
ref_geom, first_geom, last_geom = [
geom_loader(target.path)
for target in (minimum, first_endopt[0], last_endopt[0])
]
first_rmsd = ref_geom.rmsd(first_geom)
last_rmsd = ref_geom.rmsd(last_geom)
print("first", first_rmsd, "last", last_rmsd)
calc = self.get_calc()
atoms = ref_geom.atoms
def get_energy(geom):
return calc.get_energy(atoms, geom.cart_coords)["energy"]
ts_geom = geom_loader(tsopt[0].path)
ts_energy = get_energy(ts_geom)
first_energy = get_energy(first_geom)
last_energy = get_energy(last_geom)
energies = np.array((first_energy, ts_energy, last_energy))
energies -= energies.min()
energies *= AU2KJPERMOL
comments = [f"{en:.2f} kJ mol⁻¹" for en in energies]
first_kj, ts_kj, last_kj = energies
first_ts = ts_kj - first_kj
last_ts = ts_kj - last_kj
print(f"@@@ {self.key}: TS - first: {first_ts:.2f} kJ mol⁻¹")
print(f"@@@ {self.key}: TS - last: {last_ts:.2f} kJ mol⁻¹")
with open(self.get_path("first_ts_last.trj"), "w") as handle:
handle.write(
"\n".join(
[
geom.as_xyz(comment=comment)
for geom, comment in zip(
(first_geom, ts_geom, last_geom), comments
)
]
)
)
# pick geom with higher rmsd
next_ind = 1 if last_rmsd > first_rmsd else 0
next_key = ("first", "last")[next_ind]
next_geom = (first_geom, last_geom)[next_ind]
# check if bond constraints are satisfied at next_geom
bonds = get_weighted_bond_mode(next_geom.atoms, next_geom.coords3d)
print(f"@@@ {self.key}: Trying to continue with {next_key}_geom")
print(f"@@@ {self.key}: bonds for next step {bonds}\n@@@")
if bonds:
new_step = self.step + 1
if new_step == 1:
with open(self.out_dir / f"{new_step:02d}_input.xyz", "w") as handle:
handle.write(next_geom.as_xyz())
yield ReactionPath(self.id_, self.base, new_step)
else:
self.complete = lambda: True
class ReactionPathSummary(Params, luigi.Task):
def output(self):
pass
def requires(self):
return ReactionPath(self.id_, self.base, self.step)
def run(self):
pass
class ReactionPathSummaries(luigi.WrapperTask):
def requires(self):
for id_, _ in enumerate(GEOMS):
# yield ReactionPathSummary(id_=id_)
yield ReactionPath(id_=id_)
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("fn")
parser.add_argument("bonds", nargs="+", type=int)
parser.add_argument("--first", type=int, default=0)
return parser.parse_args(args)
def run():
args = parse_args(sys.argv[1:])
fn = args.fn
bonds = args.bonds
first = args.first
# Using dtype = int here may be problematic, as this excludes
# fractional weights in bonds.
bonds = np.array(bonds, dtype=int).reshape(-1, 3).tolist()
global GEOMS
GEOMS = list(geom_loader(fn, iterable=True))
if first > 0:
GEOMS = GEOMS[:first]
global get_weighted_bond_mode
get_weighted_bond_mode = get_weighted_bond_mode_getter(bonds, fractional=False)
# luigi.build((ReactionPath(id_=0),), local_scheduler=True)
luigi.build((ReactionPathSummaries(),), local_scheduler=True)
if __name__ == "__main__":
run()
| eljost/pysisyphus | scripts/batchnt_multi.py | Python | gpl-3.0 | 11,910 | [
"xTB"
] | 11e33f1006168bff5e461fe1988175fb106b05802dcb9162920a83ecddb5d917 |
# -*- coding: utf-8 -*-
##########################################################################
# #
# Eddy: a graphical editor for the specification of Graphol ontologies #
# Copyright (C) 2015 Daniele Pantaleone <danielepantaleone@me.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##################### ##################### #
# #
# Graphol is developed by members of the DASI-lab group of the #
# Dipartimento di Ingegneria Informatica, Automatica e Gestionale #
# A.Ruberti at Sapienza University of Rome: http://www.dis.uniroma1.it #
# #
# - Domenico Lembo <lembo@dis.uniroma1.it> #
# - Valerio Santarelli <santarelli@dis.uniroma1.it> #
# - Domenico Fabio Savo <savo@dis.uniroma1.it> #
# - Daniele Pantaleone <pantaleone@dis.uniroma1.it> #
# - Marco Console <console@dis.uniroma1.it> #
# #
##########################################################################
import os
import sys
import textwrap
from collections import OrderedDict
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from eddy import APPNAME, DIAG_HOME, GRAPHOL_HOME
from eddy import ORGANIZATION, VERSION, WORKSPACE
from eddy.core.clipboard import Clipboard
from eddy.core.commands.common import CommandComposeAxiom
from eddy.core.commands.common import CommandItemsRemove
from eddy.core.commands.common import CommandItemsTranslate
from eddy.core.commands.common import CommandSnapItemsToGrid
from eddy.core.commands.diagram import CommandDiagramAdd
from eddy.core.commands.diagram import CommandDiagramRemove
from eddy.core.commands.diagram import CommandDiagramRename
from eddy.core.commands.edges import CommandEdgeBreakpointRemove
from eddy.core.commands.edges import CommandEdgeSwap
from eddy.core.commands.labels import CommandLabelMove
from eddy.core.commands.labels import CommandLabelChange
from eddy.core.commands.nodes_2 import CommandNodeSetRemainingCharacters
from eddy.core.commands.nodes import CommandNodeSwitchTo
from eddy.core.commands.nodes import CommandNodeSetBrush
from eddy.core.commands.nodes import CommandNodeSetDepth
from eddy.core.commands.project import CommandProjectSetProfile, CommandProjectDisconnectSpecificSignals, CommandProjectConnectSpecificSignals
from eddy.core.common import HasActionSystem
from eddy.core.common import HasDiagramExportSystem
from eddy.core.common import HasDiagramLoadSystem
from eddy.core.common import HasMenuSystem
from eddy.core.common import HasNotificationSystem
from eddy.core.common import HasOntologyExportSystem
from eddy.core.common import HasOntologyLoadSystem
from eddy.core.common import HasPluginSystem
from eddy.core.common import HasReasoningSystem
from eddy.core.common import HasProfileSystem
from eddy.core.common import HasProjectExportSystem
from eddy.core.common import HasProjectLoadSystem
from eddy.core.common import HasThreadingSystem
from eddy.core.common import HasWidgetSystem
from eddy.core.datatypes.graphol import Identity, Item
from eddy.core.datatypes.graphol import Restriction, Special
from eddy.core.datatypes.misc import Color, DiagramMode
from eddy.core.datatypes.owl import Datatype, Facet
from eddy.core.datatypes.qt import BrushIcon, Font
from eddy.core.datatypes.system import Channel, File
from eddy.core.diagram import Diagram
from eddy.core.exporters.graphml import GraphMLDiagramExporter
from eddy.core.exporters.graphol import GrapholProjectExporter
from eddy.core.exporters.owl2 import OWLOntologyExporter
from eddy.core.exporters.pdf import PdfDiagramExporter
from eddy.core.exporters.graphreferences import GraphReferences
from eddy.core.exporters.printer import PrinterDiagramExporter
from eddy.core.factory import MenuFactory, PropertyFactory, DescriptionFactory
from eddy.core.functions.fsystem import fexists
from eddy.core.functions.misc import first, format_exception
from eddy.core.functions.misc import snap, snapF
from eddy.core.functions.path import expandPath
from eddy.core.functions.path import shortPath
from eddy.core.functions.signals import connect, disconnect
from eddy.core.loaders.graphml import GraphMLOntologyLoader
from eddy.core.loaders.graphol import GrapholOntologyLoader_v2
from eddy.core.loaders.graphol import GrapholProjectLoader_v2
from eddy.core.output import getLogger
from eddy.core.plugin import PluginManager
from eddy.core.reasoner import ReasonerManager
from eddy.core.profiles.owl2 import OWL2Profile
from eddy.core.profiles.owl2ql import OWL2QLProfile
from eddy.core.profiles.owl2rl import OWL2RLProfile
from eddy.core.commands.nodes_2 import CommandProjetSetIRIPrefixesNodesDict, CommandProjetSetIRIofCutNodes
from eddy.core.update import UpdateCheckWorker
from eddy.ui.about import AboutDialog
from eddy.ui.fields import ComboBox
from eddy.ui.forms import CardinalityRestrictionForm
from eddy.ui.forms import NewDiagramForm
from eddy.ui.forms import RefactorNameForm
from eddy.ui.forms import RenameDiagramForm
from eddy.ui.forms import ValueForm
from eddy.ui.log import LogDialog
from eddy.ui.mdi import MdiArea
from eddy.ui.mdi import MdiSubWindow
from eddy.ui.plugin import PluginInstallDialog
from eddy.ui.preferences import PreferencesDialog
from eddy.ui.progress import BusyProgressDialog
from eddy.ui.syntax import SyntaxValidationDialog
from eddy.ui.prefix_explorer import OntologyExplorerDialog
from eddy.ui.ontology_consistency_check import OntologyConsistencyCheckDialog
from eddy.ui.view import DiagramView
from eddy.core.items.common import AbstractItem
_LINUX = sys.platform.startswith('linux')
_MACOS = sys.platform.startswith('darwin')
_WIN32 = sys.platform.startswith('win32')
LOGGER = getLogger()
class Session(HasReasoningSystem, HasActionSystem, HasMenuSystem, HasPluginSystem, HasWidgetSystem,
HasDiagramExportSystem, HasOntologyExportSystem, HasProjectExportSystem,
HasDiagramLoadSystem, HasOntologyLoadSystem, HasProjectLoadSystem,
HasProfileSystem, HasThreadingSystem, HasNotificationSystem, QtWidgets.QMainWindow):
"""
Extends QtWidgets.QMainWindow and implements Eddy main working session.
Additionally to built-in signals, this class emits:
* sgnClosed: whenever the current session is closed.
* sgnFocusDiagram: whenever a diagram is to be focused.
* sgnFocusItem: whenever an item is to be focused.
* sgnPluginDisposed: to notify that a plugin has been destroyed.
* sgnPluginStarted: to notify that a plugin startup sequence has been completed.
* sgnProjectSaved: to notify that the current project has been saved.
* sgnQuit: whenever the application is to be terminated.
* sgnReady: after the session startup sequence completes.
* sgnSaveProject: whenever the current project is to be saved.
* sgnUpdateState: to notify that something in the session state changed.
"""
sgnClosed = QtCore.pyqtSignal()
sgnCheckForUpdate = QtCore.pyqtSignal()
sgnDiagramFocused = QtCore.pyqtSignal('QGraphicsScene')
sgnFocusDiagram = QtCore.pyqtSignal('QGraphicsScene')
sgnFocusItem = QtCore.pyqtSignal('QGraphicsItem')
sgnPluginDisposed = QtCore.pyqtSignal(str)
sgnPluginStarted = QtCore.pyqtSignal(str)
sgnProjectSaved = QtCore.pyqtSignal()
sgnQuit = QtCore.pyqtSignal()
sgnReady = QtCore.pyqtSignal()
sgnSaveProject = QtCore.pyqtSignal()
sgnUpdateState = QtCore.pyqtSignal()
sgnReasonerDisposed = QtCore.pyqtSignal(str)
sgnReasonerStarted = QtCore.pyqtSignal(str)
def __init__(self, application, path, **kwargs):
"""
Initialize the application main working session.
:type application: QApplication
:type path: str
:type kwargs: dict
"""
super().__init__(**kwargs)
#############################################
# INITIALIZE MAIN STUFF
#################################
self.app = application
self.clipboard = Clipboard(self)
self.undostack = QtWidgets.QUndoStack(self)
self.mdi = MdiArea(self)
self.mf = MenuFactory(self)
self.pf = PropertyFactory(self)
self.df = DescriptionFactory(self)
self.pmanager = PluginManager(self)
self.rmanager = ReasonerManager(self) # written by ASHWIN RAVISHANKAR
self.project = None
#############################################
# CONFIGURE SESSION
#################################
self.initReasoners() # written by ASHWIN RAVISHANKAR
self.initPre()
self.initActions()
self.initMenus()
self.initProfiles()
self.initWidgets()
self.initExporters()
self.initLoaders()
self.initSignals()
self.initStatusBar()
self.initToolBars()
self.initPlugins()
self.initState()
#############################################
# LOAD THE GIVEN PROJECT
#################################
worker = self.createProjectLoader(File.Graphol, path, self)
worker.run()
#############################################
# COMPLETE SESSION SETUP
#################################
self.setAcceptDrops(False)
self.setCentralWidget(self.mdi)
self.setDockOptions(Session.AnimatedDocks | Session.AllowTabbedDocks)
self.setWindowIcon(QtGui.QIcon(':/icons/128/ic_eddy'))
self.setWindowTitle(self.project)
self.sgnReady.emit()
LOGGER.info('Session startup completed: %s v%s [%s]', APPNAME, VERSION, self.project.name)
#############################################
# SESSION CONFIGURATION
#################################
def initActions(self):
"""
Configure application built-in actions.
"""
#############################################
# APPLICATION GENERIC
#################################
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_settings_black'), 'Preferences', self,
objectName='open_preferences', shortcut=QtGui.QKeySequence.Preferences,
statusTip='Open application preferences', triggered=self.doOpenDialog)
action.setData(PreferencesDialog)
self.addAction(action)
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_power_settings_new_black'), 'Quit', self,
objectName='quit', shortcut=QtGui.QKeySequence.Quit,
statusTip='Quit {0}'.format(APPNAME), triggered=self.doQuit))
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_help_outline_black'), 'About {0}'.format(APPNAME),
self, objectName='about', shortcut=QtGui.QKeySequence.HelpContents,
statusTip='About {0}'.format(APPNAME), triggered=self.doOpenDialog)
action.setData(AboutDialog)
self.addAction(action)
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_link_black'), 'Visit DIAG website', self,
objectName='diag_web', statusTip='Visit DIAG website',
triggered=self.doOpenURL)
action.setData(DIAG_HOME)
self.addAction(action)
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_link_black'), 'Visit Graphol website', self,
objectName='graphol_web', statusTip='Visit Graphol website',
triggered=self.doOpenURL)
action.setData(GRAPHOL_HOME)
self.addAction(action)
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_message_black'), 'System log...',
self, objectName='system_log', statusTip='Show application system log',
triggered=self.doOpenDialog)
action.setData(LogDialog)
self.addAction(action)
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_extension_black'), 'Install Plugin...',
self, objectName='install_plugin', statusTip='Install a plugin',
triggered=self.doOpenDialog)
action.setData(PluginInstallDialog)
self.addAction(action)
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_system_update'), 'Check for Updates...',
self, objectName='check_for_updates', statusTip='Checks for available updates.',
triggered=self.doCheckForUpdate)
self.addAction(action)
settings = QtCore.QSettings(ORGANIZATION, APPNAME)
collection = settings.value('project/recent', None, str) or []
collection = collection[:5]
group = QtWidgets.QActionGroup(self, objectName='recent_projects')
for i, path in enumerate(collection, start=1):
action = QtWidgets.QAction('{0}. {1}'.format(i, os.path.basename(path)), group, triggered=self.doOpenRecent)
action.setData(path)
group.addAction(action)
self.addAction(group)
if _MACOS:
self.action('about').setIcon(QtGui.QIcon())
self.action('open_preferences').setIcon(QtGui.QIcon())
self.action('quit').setIcon(QtGui.QIcon())
#############################################
# PROJECT / DIAGRAM MANAGEMENT
#################################
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_add_document_black'), 'New diagram...',
self, objectName='new_diagram', shortcut=QtGui.QKeySequence.New,
statusTip='Create a new diagram', triggered=self.doNewDiagram))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_label_outline_black'), 'Rename...',
self, objectName='rename_diagram', statusTip='Rename a diagram',
triggered=self.doRenameDiagram))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_delete_black'), 'Delete...',
self, objectName='remove_diagram', statusTip='Delete a diagram',
triggered=self.doRemoveDiagram))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_folder_open_black'), 'Open...',
self, objectName='open', shortcut=QtGui.QKeySequence.Open,
statusTip='Open a diagram and add it to the current project',
triggered=self.doOpen))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_close_black'), 'Close', self,
objectName='close_project', shortcut=QtGui.QKeySequence.Close,
statusTip='Close the current project', triggered=self.doClose))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_save_black'), 'Save', self,
objectName='save', shortcut=QtGui.QKeySequence.Save,
statusTip='Save the current project', enabled=False,
triggered=self.doSave))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_save_black'), 'Save As...', self,
objectName='save_as', shortcut=QtGui.QKeySequence.SaveAs,
statusTip='Create a copy of the active diagram',
enabled=False, triggered=self.doSaveAs))
self.addAction(QtWidgets.QAction(
'Import...', self, objectName='import', triggered=self.doImport,
statusTip='Import a document in the current project'))
self.addAction(QtWidgets.QAction(
'Export...', self, objectName='export', triggered=self.doExport,
statusTip='Export the current project in a different format',
enabled=False))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_print_black'), 'Print...', self,
objectName='print', shortcut=QtGui.QKeySequence.Print,
statusTip='Print the active diagram', enabled=False,
triggered=self.doPrint))
#############################################
# PROJECT SPECIFIC
#################################
action = self.undostack.createUndoAction(self)
action.setIcon(QtGui.QIcon(':/icons/24/ic_undo_black'))
action.setObjectName('undo')
action.setShortcut(QtGui.QKeySequence.Undo)
self.addAction(action)
action = self.undostack.createRedoAction(self)
action.setIcon(QtGui.QIcon(':/icons/24/ic_redo_black'))
action.setObjectName('redo')
action.setShortcut(QtGui.QKeySequence.Redo)
self.addAction(action)
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_spellcheck_black'), 'Run syntax check',
self, objectName='syntax_check', triggered=self.doSyntaxCheck,
statusTip='Run syntax validation according to the selected profile', enabled=False))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/18/ic_treeview_branch_closed'), 'Run consistency check on active ontology',
self, objectName='ontology_consistency_check', triggered=self.doOntologyConsistencyCheck,
statusTip='Run Reasoner', enabled=False))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_refresh_black'), 'Reset consistency check',
self, objectName='decolour_nodes', triggered=self.BackgrounddeColourNodesAndEdges,
statusTip='(decolour the nodes)', enabled=False))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_settings_ethernet_black'),
'Open Ontology Manager',
self, objectName='open_prefix_manager', enabled=True,
statusTip='Open Ontology Manager', triggered=self.doOpenOntologyExplorer))
#############################################
# DIAGRAM SPECIFIC
#################################
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_center_focus_strong_black'), 'Center diagram', self,
objectName='center_diagram', statusTip='Center the active diagram',
enabled=False, triggered=self.doCenterDiagram))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_settings_black'), 'Properties...',
self, objectName='diagram_properties',
statusTip='Open current diagram properties',
triggered=self.doOpenDiagramProperties))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_healing_black'), 'Snap to grid',
self, objectName='snap_to_grid', enabled=False,
statusTip='Align the elements in the active diagram to the grid',
triggered=self.doSnapTopGrid))
icon = QtGui.QIcon()
icon.addFile(':/icons/24/ic_grid_on_black', QtCore.QSize(), QtGui.QIcon.Normal, QtGui.QIcon.On)
icon.addFile(':/icons/24/ic_grid_off_black', QtCore.QSize(), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.addAction(QtWidgets.QAction(
icon, 'Toggle the grid', self, objectName='toggle_grid', enabled=False,
checkable=True, statusTip='Activate or deactivate the diagram grid',
triggered=self.doToggleGrid))
#############################################
# ITEM GENERICS
#################################
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_content_cut_black'), 'Cut', self,
objectName='cut', enabled=False, shortcut=QtGui.QKeySequence.Cut,
statusTip='Cut selected items', triggered=self.doCut))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_content_copy_black'), 'Copy', self,
objectName='copy', enabled=False, shortcut=QtGui.QKeySequence.Copy,
statusTip='Copy selected items', triggered=self.doCopy))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_content_paste_black'), 'Paste', self,
objectName='paste', enabled=False, shortcut=QtGui.QKeySequence.Paste,
statusTip='Paste previously copied items', triggered=self.doPaste))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_delete_black'), 'Delete', self,
objectName='delete', enabled=False, shortcut=QtGui.QKeySequence.Delete,
statusTip='Delete selected items', triggered=self.doDelete))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_delete_forever_black'), 'Purge', self,
objectName='purge', enabled=False, triggered=self.doPurge,
statusTip='Delete selected items by also removing no more necessary elements'))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_flip_to_front_black'), 'Bring to front',
self, objectName='bring_to_front', enabled=False,
statusTip='Bring selected items to front',
triggered=self.doBringToFront))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_flip_to_back_black'), 'Send to back',
self, objectName='send_to_back', enabled=False,
statusTip='Send selected items to back',
triggered=self.doSendToBack))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_select_all_black'), 'Select all',
self, objectName='select_all', enabled=False,
statusTip='Select all items in the active diagram',
shortcut=QtGui.QKeySequence.SelectAll, triggered=self.doSelectAll))
#############################################
# EDGE RELATED
#################################
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_delete_black'), 'Remove breakpoint', self,
objectName='remove_breakpoint', statusTip='Remove the selected edge breakpoint',
triggered=self.doRemoveBreakpoint))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_swap_horiz_black'), 'Swap edge', self,
objectName='swap_edge', shortcut='ALT+S', enabled=False,
statusTip='Swap the direction of all the selected edges',
triggered=self.doSwapEdge))
#############################################
# NODE RELATED
#################################
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_settings_black'), 'Properties...',
self, objectName='node_properties',
triggered=self.doOpenNodeProperties))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_node_description'), 'Description',
self, objectName='node_description',
triggered=self.doOpenNodeDescription))
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_label_outline_black'), 'Rename...',
self, objectName='refactor_name',
triggered=self.doRefactorName))
"""
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_label_outline_black'), 'Change prefix...',
self, objectName='refactor_change_prefix',
triggered=self.doRefactorChangeprefix))
"""
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_refresh_black'), 'Relocate label',
self, objectName='relocate_label',
triggered=self.doRelocateLabel))
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_top_black'), Special.Top.value,
self, objectName='special_top',
triggered=self.doSetNodeSpecial)
action.setData(Special.Top)
self.addAction(action)
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_bottom_black'), Special.Bottom.value,
self, objectName='special_bottom',
triggered=self.doSetNodeSpecial)
action.setData(Special.Bottom)
self.addAction(action)
style = self.style()
isize = style.pixelMetric(QtWidgets.QStyle.PM_ToolBarIconSize)
for name, trigger in (('brush', self.doSetNodeBrush), ('refactor_brush', self.doRefactorBrush)):
group = QtWidgets.QActionGroup(self, objectName=name)
for color in Color:
action = QtWidgets.QAction(
BrushIcon(isize, isize, color.value), color.name,
self, checkable=False, triggered=trigger)
action.setData(color)
group.addAction(action)
self.addAction(group)
#############################################
# ROLE SPECIFIC
#################################
self.addAction(QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_square_pair_black'), 'Invert Role', self,
objectName='invert_role', triggered=self.doInvertRole,
statusTip='Invert the selected role in all its occurrences'))
#############################################
# ROLE / ATTRIBUTE SPECIFIC
#################################
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_square_outline_black'), 'Domain',
self, objectName='property_domain', shortcut='CTRL+D',
triggered=self.doComposePropertyExpression)
action.setData((Item.DomainRestrictionNode,))
self.addAction(action)
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_square_black'), 'Range',
self, objectName='property_range', shortcut='CTRL+R',
triggered=self.doComposePropertyExpression)
action.setData((Item.RangeRestrictionNode,))
self.addAction(action)
action = QtWidgets.QAction(
QtGui.QIcon(':/icons/24/ic_square_half_black'), 'Domain/Range',
self, objectName='property_domain_range',
triggered=self.doComposePropertyExpression)
action.setData((Item.DomainRestrictionNode, Item.RangeRestrictionNode))
self.addAction(action)
#############################################
# PROPERTY DOMAIN / RANGE SPECIFIC
#################################
group = QtWidgets.QActionGroup(self, objectName='restriction')
for restriction in Restriction:
action = QtWidgets.QAction(restriction.value, group,
objectName=restriction.name, checkable=True,
triggered=self.doSetPropertyRestriction)
action.setData(restriction)
group.addAction(action)
self.addAction(group)
data = OrderedDict()
data[Item.DomainRestrictionNode] = 'Domain'
data[Item.RangeRestrictionNode] = 'Range'
group = QtWidgets.QActionGroup(self, objectName='switch_restriction')
for k, v in data.items():
action = QtWidgets.QAction(v, group,
objectName=k.name, checkable=True,
triggered=self.doSwitchRestrictionNode)
action.setData(k)
group.addAction(action)
self.addAction(group)
#############################################
# VALUE-DOMAIN SPECIFIC
#################################
group = QtWidgets.QActionGroup(self, objectName='datatype')
for datatype in Datatype:
action = QtWidgets.QAction(datatype.value, group,
objectName=datatype.name, checkable=True,
triggered=self.doSetDatatype)
action.setData(datatype)
group.addAction(action)
self.addAction(group)
#############################################
# INDIVIDUAL SPECIFIC
#################################
group = QtWidgets.QActionGroup(self, objectName='switch_individual')
for identity in (Identity.Individual, Identity.Value):
action = QtWidgets.QAction(identity.value, group,
objectName=identity.name, checkable=True,
triggered=self.doSetIndividualAs)
action.setData(identity)
group.addAction(action)
self.addAction(group)
#############################################
# FACET SPECIFIC
#################################
group = QtWidgets.QActionGroup(self, objectName='facet')
for facet in Facet:
action = QtWidgets.QAction(facet.value, group,
objectName=facet.name, checkable=True,
triggered=self.doSetFacet)
action.setData(facet)
group.addAction(action)
self.addAction(group)
#############################################
# OPERATORS SPECIFIC
#################################
data = OrderedDict()
data[Item.ComplementNode] = 'Complement'
data[Item.DisjointUnionNode] = 'Disjoint union'
data[Item.DatatypeRestrictionNode] = 'Datatype restriction'
data[Item.EnumerationNode] = 'Enumeration'
data[Item.IntersectionNode] = 'Intersection'
data[Item.RoleChainNode] = 'Role chain'
data[Item.RoleInverseNode] = 'Role inverse'
data[Item.UnionNode] = 'Union'
group = QtWidgets.QActionGroup(self, objectName='switch_operator')
for k, v in data.items():
action = QtWidgets.QAction(v, group,
objectName=k.name, checkable=True,
triggered=self.doSwitchOperatorNode)
action.setData(k)
group.addAction(action)
self.addAction(group)
def initExporters(self):
"""
Initialize diagram and project exporters.
"""
self.addDiagramExporter(GraphMLDiagramExporter)
self.addDiagramExporter(PdfDiagramExporter)
self.addDiagramExporter(GraphReferences)
self.addOntologyExporter(OWLOntologyExporter)
self.addProjectExporter(GrapholProjectExporter)
def initLoaders(self):
"""
Initialize diagram and project loaders.
"""
self.addOntologyLoader(GraphMLOntologyLoader)
self.addOntologyLoader(GrapholOntologyLoader_v2)
self.addProjectLoader(GrapholProjectLoader_v2)
def initMenus(self):
"""
Configure application built-in menus.
"""
#############################################
# MENU BAR RELATED
#################################
menu = QtWidgets.QMenu('File', objectName='file')
menu.addAction(self.action('new_diagram'))
menu.addAction(self.action('open'))
menu.addSeparator()
menu.addAction(self.action('save'))
menu.addAction(self.action('save_as'))
menu.addAction(self.action('close_project'))
menu.addSeparator()
menu.addAction(self.action('import'))
menu.addAction(self.action('export'))
menu.addSeparator()
for action in self.action('recent_projects').actions():
menu.addAction(action)
menu.addSeparator()
menu.addAction(self.action('print'))
menu.addSeparator()
menu.addAction(self.action('quit'))
self.addMenu(menu)
menu = QtWidgets.QMenu('\u200CEdit', objectName='edit')
menu.addAction(self.action('undo'))
menu.addAction(self.action('redo'))
menu.addSeparator()
menu.addAction(self.action('cut'))
menu.addAction(self.action('copy'))
menu.addAction(self.action('paste'))
menu.addAction(self.action('delete'))
menu.addSeparator()
menu.addAction(self.action('bring_to_front'))
menu.addAction(self.action('send_to_back'))
menu.addSeparator()
menu.addAction(self.action('swap_edge'))
menu.addSeparator()
menu.addAction(self.action('select_all'))
menu.addAction(self.action('snap_to_grid'))
menu.addAction(self.action('center_diagram'))
menu.addSeparator()
menu.addAction(self.action('open_preferences'))
self.addMenu(menu)
menu = QtWidgets.QMenu('Compose', objectName='compose')
menu.addAction(self.action('property_domain'))
menu.addAction(self.action('property_range'))
menu.addAction(self.action('property_domain_range'))
self.addMenu(menu)
menu = QtWidgets.QMenu('Toolbars', objectName='toolbars')
menu.addAction(self.widget('document_toolbar').toggleViewAction())
menu.addAction(self.widget('editor_toolbar').toggleViewAction())
menu.addAction(self.widget('graphol_toolbar').toggleViewAction())
menu.addAction(self.widget('view_toolbar').toggleViewAction())
menu.addAction(self.widget('reasoner_toolbar').toggleViewAction()) # ASHWIN RAVISHANKAR
self.addMenu(menu)
menu = QtWidgets.QMenu('\u200CView', objectName='view')
menu.addAction(self.action('toggle_grid'))
menu.addSeparator()
menu.addMenu(self.menu('toolbars'))
menu.addSeparator()
self.addMenu(menu)
menu = QtWidgets.QMenu('Ontology', objectName='ontology')
menu.addAction(self.action('syntax_check'))
menu.addAction(self.action('ontology_consistency_check'))
menu.addSeparator()
menu.addAction(self.action('open_prefix_manager'))
self.addMenu(menu)
menu = QtWidgets.QMenu('Tools', objectName='tools')
menu.addAction(self.action('install_plugin'))
menu.addSeparator()
menu.addAction(self.action('system_log'))
self.addMenu(menu)
menu = QtWidgets.QMenu('Help', objectName='help')
menu.addAction(self.action('about'))
if not _MACOS:
menu.addSeparator()
menu.addAction(self.action('check_for_updates'))
menu.addSeparator()
menu.addAction(self.action('diag_web'))
menu.addAction(self.action('graphol_web'))
self.addMenu(menu)
#############################################
# NODE GENERIC
#################################
menu = QtWidgets.QMenu('Select color', objectName='brush')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_format_color_fill_black'))
menu.addActions(self.action('brush').actions())
self.addMenu(menu)
menu = QtWidgets.QMenu('Special type', objectName='special')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_star_black'))
menu.addAction(self.action('special_top'))
menu.addAction(self.action('special_bottom'))
self.addMenu(menu)
menu = QtWidgets.QMenu('Change Prefix', objectName='Change Prefix')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_star_black'))
menu.addAction(self.action('special_top'))
menu.addAction(self.action('special_bottom'))
self.addMenu(menu)
menu = QtWidgets.QMenu('Select color', objectName='refactor_brush')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_format_color_fill_black'))
menu.addActions(self.action('refactor_brush').actions())
self.addMenu(menu)
#menu = QtWidgets.QMenu('Change prefix', objectName='refactor_change_prefix')
#menu.setIcon(QtGui.QIcon(':/icons/24/ic_format_color_fill_black'))
#menu.addActions(self.action('refactor_change_prefix').actions())
#self.addMenu(menu)
menu = QtWidgets.QMenu('Refactor', objectName='refactor')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_format_shapes_black'))
menu.addAction(self.action('refactor_name'))
#menu.addMenu(self.menu('refactor_change_prefix'))
menu.addMenu(self.menu('refactor_brush'))
self.addMenu(menu)
#############################################
# ROLE / ATTRIBUTE SPECIFIC
#################################
menu = QtWidgets.QMenu('Compose', objectName='compose_domain_range')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_create_black'))
menu.addAction(self.action('property_domain'))
menu.addAction(self.action('property_range'))
menu.addSeparator()
menu.addAction(self.action('property_domain_range'))
self.addMenu(menu)
#############################################
# VALUE-DOMAIN SPECIFIC
#################################
menu = QtWidgets.QMenu('Select type', objectName='datatype')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_transform_black'))
menu.addActions(self.action('datatype').actions())
self.addMenu(menu)
#############################################
# FACET SPECIFIC
#################################
menu = QtWidgets.QMenu('Select facet', objectName='facet')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_transform_black'))
menu.addActions(self.action('facet').actions())
self.addMenu(menu)
#############################################
# PROPERTY DOMAIN / RANGE SPECIFIC
#################################
menu = QtWidgets.QMenu('Select restriction', objectName='property_restriction')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_settings_ethernet'))
menu.addActions(self.action('restriction').actions())
self.addMenu(menu)
menu = QtWidgets.QMenu('Switch to', objectName='switch_restriction')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_transform_black'))
menu.addActions(self.action('switch_restriction').actions())
self.addMenu(menu)
#############################################
# INDIVIDUAL SPECIFIC
#################################
menu = QtWidgets.QMenu('Switch to', objectName='switch_individual')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_transform_black'))
menu.addActions(self.action('switch_individual').actions())
self.addMenu(menu)
#############################################
# OPERATORS SPECIFIC
#################################
menu = QtWidgets.QMenu('Switch to', objectName='switch_operator')
menu.setIcon(QtGui.QIcon(':/icons/24/ic_transform_black'))
menu.addActions(self.action('switch_operator').actions())
self.addMenu(menu)
#############################################
# CONFIGURE MENUBAR
#################################
menuBar = self.menuBar()
menuBar.addMenu(self.menu('file'))
menuBar.addMenu(self.menu('edit'))
menuBar.addMenu(self.menu('compose'))
menuBar.addMenu(self.menu('view'))
menuBar.addMenu(self.menu('ontology'))
menuBar.addMenu(self.menu('tools'))
menuBar.addMenu(self.menu('help'))
def initPre(self):
"""
Initialize stuff that are shared by actions, menus, widgets etc.
"""
self.addWidget(QtWidgets.QToolBar('Document', objectName='document_toolbar'))
self.addWidget(QtWidgets.QToolBar('Editor', objectName='editor_toolbar'))
self.addWidget(QtWidgets.QToolBar('View', objectName='view_toolbar'))
self.addWidget(QtWidgets.QToolBar('Graphol', objectName='graphol_toolbar'))
self.addWidget(QtWidgets.QToolBar('Reasoner', objectName='reasoner_toolbar'))
def initPlugins(self):
"""
Load and initialize application plugins.
"""
skip_list = ['Explanation_explorer','Unsatisfiable_Entity_Explorer','developers_iri','prefix_explorer']
#skip_list = ['Explanation_explorer', 'Unsatisfiable_Entity_Explorer', 'prefix_explorer']
self.addPlugins(self.pmanager.init(skip_list=skip_list))
def initProfiles(self):
"""
Initialize the ontology profiles.
"""
self.addProfile(OWL2Profile)
self.addProfile(OWL2QLProfile)
self.addProfile(OWL2RLProfile)
def initSignals(self):
"""
Connect session specific signals to their slots.
"""
connect(self.undostack.cleanChanged, self.doUpdateState)
connect(self.sgnCheckForUpdate, self.doCheckForUpdate)
connect(self.sgnFocusDiagram, self.doFocusDiagram)
connect(self.sgnFocusItem, self.doFocusItem)
connect(self.sgnReady, self.doUpdateState)
connect(self.sgnReady, self.onSessionReady)
connect(self.sgnSaveProject, self.doSave)
connect(self.sgnUpdateState, self.doUpdateState)
def initState(self):
"""
Configure application state by reading the preferences file.
"""
settings = QtCore.QSettings(ORGANIZATION, APPNAME)
self.restoreGeometry(settings.value('session/geometry', QtCore.QByteArray(), QtCore.QByteArray))
self.restoreState(settings.value('session/state', QtCore.QByteArray(), QtCore.QByteArray))
self.action('toggle_grid').setChecked(settings.value('diagram/grid', False, bool))
def initStatusBar(self):
"""
Configure the status bar.
"""
statusbar = QtWidgets.QStatusBar(self)
statusbar.addPermanentWidget(self.widget('progress_bar'))
statusbar.addPermanentWidget(QtWidgets.QWidget())
statusbar.setSizeGripEnabled(False)
self.setStatusBar(statusbar)
def initToolBars(self):
"""
Configure application built-in toolbars.
"""
toolbar = self.widget('document_toolbar')
toolbar.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
toolbar.addAction(self.action('new_diagram'))
toolbar.addAction(self.action('open'))
toolbar.addAction(self.action('save'))
toolbar.addAction(self.action('print'))
toolbar = self.widget('editor_toolbar')
toolbar.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
toolbar.addAction(self.action('undo'))
toolbar.addAction(self.action('redo'))
toolbar.addSeparator()
toolbar.addAction(self.action('cut'))
toolbar.addAction(self.action('copy'))
toolbar.addAction(self.action('paste'))
toolbar.addAction(self.action('delete'))
toolbar.addAction(self.action('purge'))
toolbar.addSeparator()
toolbar.addAction(self.action('bring_to_front'))
toolbar.addAction(self.action('send_to_back'))
toolbar.addSeparator()
toolbar.addAction(self.action('swap_edge'))
toolbar.addWidget(self.widget('button_set_brush'))
toolbar = self.widget('view_toolbar')
toolbar.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
toolbar.addAction(self.action('toggle_grid'))
toolbar.addAction(self.action('snap_to_grid'))
toolbar.addAction(self.action('center_diagram'))
toolbar = self.widget('graphol_toolbar')
toolbar.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
toolbar.addWidget(self.widget('profile_switch'))
toolbar.addAction(self.action('syntax_check'))
toolbar = self.widget('reasoner_toolbar')
toolbar.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
toolbar.addWidget(self.widget('select_reasoner'))
toolbar.addAction(self.action('ontology_consistency_check'))
toolbar.addAction(self.action('decolour_nodes'))
self.addToolBar(QtCore.Qt.TopToolBarArea, self.widget('document_toolbar'))
self.addToolBar(QtCore.Qt.TopToolBarArea, self.widget('editor_toolbar'))
self.addToolBar(QtCore.Qt.TopToolBarArea, self.widget('view_toolbar'))
self.addToolBar(QtCore.Qt.TopToolBarArea, self.widget('graphol_toolbar'))
self.addToolBar(QtCore.Qt.TopToolBarArea, self.widget('reasoner_toolbar'))
def initWidgets(self):
"""
Configure application built-in widgets.
"""
button = QtWidgets.QToolButton(objectName='button_set_brush')
button.setIcon(QtGui.QIcon(':/icons/24/ic_format_color_fill_black'))
button.setMenu(self.menu('brush'))
button.setPopupMode(QtWidgets.QToolButton.InstantPopup)
button.setStatusTip('Change the background color of the selected predicate nodes')
button.setEnabled(False)
self.addWidget(button)
combobox = ComboBox(objectName='profile_switch')
combobox.setEditable(False)
combobox.setFont(Font('Roboto', 12))
combobox.setFocusPolicy(QtCore.Qt.StrongFocus)
combobox.setScrollEnabled(False)
combobox.setStatusTip('Change the profile of the active project')
combobox.addItems(self.profileNames())
connect(combobox.activated, self.doSetProfile)
self.addWidget(combobox)
reasoner_names = []
for entry in ReasonerManager.info:
reasoner_name = entry[0].get('reasoner', 'name')
reasoner_names.append(reasoner_name)
combobox_2 = ComboBox(objectName='select_reasoner')
combobox_2.setEditable(False)
combobox_2.setFont(Font('Roboto', 12))
combobox_2.setFocusPolicy(QtCore.Qt.StrongFocus)
combobox_2.setScrollEnabled(False)
combobox_2.setStatusTip('Select one of any available reasoners')
combobox_2.addItems(reasoner_names)
combobox_2.setEnabled(False)
connect(combobox_2.activated, self.doSelectReasoner)
self.addWidget(combobox_2)
progressBar = QtWidgets.QProgressBar(objectName='progress_bar')
progressBar.setContentsMargins(0, 0, 0, 0)
progressBar.setFixedSize(222, 14)
progressBar.setRange(0, 0)
progressBar.setVisible(False)
self.addWidget(progressBar)
def initReasoners(self):
self.addReasoners(self.rmanager.init())
#############################################
# SLOTS
#################################
@QtCore.pyqtSlot()
def doBringToFront(self):
"""
Bring the selected item to the top of the diagram.
"""
diagram = self.mdi.activeDiagram()
if diagram:
commands = []
diagram.setMode(DiagramMode.Idle)
for node in diagram.selectedNodes():
zValue = 0
for item in [x for x in node.collidingItems() if x.type() is not Item.Label]:
if item.zValue() >= zValue:
zValue = item.zValue() + 0.2
if zValue != node.zValue():
commands.append(CommandNodeSetDepth(diagram, node, zValue))
if commands:
if len(commands) > 1:
self.undostack.beginMacro('change the depth of {0} nodes'.format(len(commands)))
for command in commands:
self.undostack.push(command)
self.undostack.endMacro()
else:
self.undostack.push(first(commands))
@QtCore.pyqtSlot()
def doCenterDiagram(self):
"""
Center the active diagram.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
items = diagram.items()
if items:
R1 = diagram.sceneRect()
R2 = diagram.visibleRect(margin=0)
moveX = snapF(((R1.right() - R2.right()) - (R2.left() - R1.left())) / 2, Diagram.GridSize)
moveY = snapF(((R1.bottom() - R2.bottom()) - (R2.top() - R1.top())) / 2, Diagram.GridSize)
if moveX or moveY:
items = [x for x in items if x.isNode() or x.isEdge()]
command = CommandItemsTranslate(diagram, items, moveX, moveY, 'center diagram')
self.undostack.push(command)
self.mdi.activeView().centerOn(0, 0)
@QtCore.pyqtSlot()
def doCheckForUpdate(self):
"""
Execute the update check routine.
"""
channel = Channel.Beta
# SHOW PROGRESS BAR
progressBar = self.widget('progress_bar')
progressBar.setToolTip('Checking for updates...')
progressBar.setVisible(True)
# RUN THE UPDATE CHECK WORKER IN A THREAD
try:
settings = QtCore.QSettings(ORGANIZATION, APPNAME)
channel = Channel.valueOf(settings.value('update/channel', channel, str))
except TypeError:
pass
finally:
worker = UpdateCheckWorker(channel, VERSION)
connect(worker.sgnNoUpdateAvailable, self.onNoUpdateAvailable)
connect(worker.sgnNoUpdateDataAvailable, self.onNoUpdateDataAvailable)
connect(worker.sgnUpdateAvailable, self.onUpdateAvailable)
self.startThread('updateCheck', worker)
@QtCore.pyqtSlot()
def doClose(self):
"""
Close the currently active subwindow.
"""
self.close()
self.sgnClosed.emit()
@QtCore.pyqtSlot()
def doComposePropertyExpression(self):
"""
Compose a property domain using the selected role/attribute node.
"""
positions = []
def compose(scene, source, items):
"""
Returns a collection of items to be added to the given source node to compose a property expression.
:type scene: Diagram
:type source: AbstractNode
:type items: tuple
:rtype: set
"""
collection = set()
for item in items:
restriction = scene.factory.create(item)
edge = scene.factory.create(Item.InputEdge, source=source, target=restriction)
size = Diagram.GridSize
offsets = (
QtCore.QPointF(snapF(+source.width() / 2 + 70, size), 0),
QtCore.QPointF(snapF(-source.width() / 2 - 70, size), 0),
QtCore.QPointF(0, snapF(-source.height() / 2 - 70, size)),
QtCore.QPointF(0, snapF(+source.height() / 2 + 70, size)),
QtCore.QPointF(snapF(+source.width() / 2 + 70, size), snapF(-source.height() / 2 - 70, size)),
QtCore.QPointF(snapF(-source.width() / 2 - 70, size), snapF(-source.height() / 2 - 70, size)),
QtCore.QPointF(snapF(+source.width() / 2 + 70, size), snapF(+source.height() / 2 + 70, size)),
QtCore.QPointF(snapF(-source.width() / 2 - 70, size), snapF(+source.height() / 2 + 70, size)),
)
pos = source.pos() + offsets[0]
num = sys.maxsize
rad = QtCore.QPointF(restriction.width() / 2, restriction.height() / 2)
for o in offsets:
if source.pos() + o not in positions:
count = len(scene.items(QtCore.QRectF(source.pos() + o - rad, source.pos() + o + rad)))
if count < num:
num = count
pos = source.pos() + o
restriction.setPos(pos)
collection.update({restriction, edge})
positions.append(pos)
return collection
diagram = self.mdi.activeDiagram()
if diagram:
commands = []
action = self.sender()
elements = action.data()
diagram.setMode(DiagramMode.Idle)
supported = {Item.RoleNode, Item.AttributeNode}
for node in diagram.selectedNodes(lambda x: x.type() in supported):
name = 'compose {0} restriction(s)'.format(node.shortName)
addons = compose(diagram, node, elements)
nodes = {x for x in addons if x.isNode()}
edges = {x for x in addons if x.isEdge()}
commands.append(CommandComposeAxiom(name, diagram, node, nodes, edges))
if commands:
if len(commands) > 1:
self.undostack.beginMacro('compose attribute/role restriction(s)')
for command in commands:
self.undostack.push(command)
self.undostack.endMacro()
else:
self.undostack.push(first(commands))
def common_commands_for_cut_delete_purge(self,diagram,items):
Duplicate_dict_1 = self.project.copy_IRI_prefixes_nodes_dictionaries(self.project.IRI_prefixes_nodes_dict,
dict())
Duplicate_dict_2 = self.project.copy_IRI_prefixes_nodes_dictionaries(self.project.IRI_prefixes_nodes_dict,
dict())
Dup_1B = self.project.copy_list(self.project.iri_of_cut_nodes, [])
Dup_2B = self.project.copy_list(self.project.iri_of_cut_nodes, [])
iris_to_update = []
nodes_to_update = []
for item in items:
if (('AttributeNode' in str(type(item))) or ('ConceptNode' in str(type(item))) or (
'IndividualNode' in str(type(item))) or ('RoleNode' in str(type(item)))):
iri_of_node = self.project.get_iri_of_node(item)
iris_to_update.append(iri_of_node)
nodes_to_update.append(item)
Dup_1B.append(item)
Dup_1B.append(iri_of_node)
Duplicate_dict_1[iri_of_node][1].remove(item)
commands = []
commands.append(CommandItemsRemove(diagram, items))
commands.append(
CommandProjetSetIRIPrefixesNodesDict(self.project, Duplicate_dict_2, Duplicate_dict_1, iris_to_update,
nodes_to_update))
commands.append(CommandProjetSetIRIofCutNodes(Dup_2B, Dup_1B, self.project))
self.undostack.beginMacro('>>')
for command in commands:
if command:
self.undostack.push(command)
self.undostack.endMacro()
@QtCore.pyqtSlot()
def doCopy(self):
"""
Make a copy of selected items.
"""
self.project.iri_of_cut_nodes[:] = []
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
diagram.pasteX = Clipboard.PasteOffsetX
diagram.pasteY = Clipboard.PasteOffsetY
self.clipboard.update(diagram)
self.sgnUpdateState.emit()
@QtCore.pyqtSlot()
def doCut(self):
"""
Cut selected items from the active diagram.
"""
self.project.iri_of_cut_nodes[:] = []
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
diagram.pasteX = 0
diagram.pasteY = 0
self.clipboard.update(diagram)
self.sgnUpdateState.emit()
items = diagram.selectedItems()
if items:
items.extend([x for item in items if item.isNode() for x in item.edges if x not in items])
self.common_commands_for_cut_delete_purge(diagram, items)
@QtCore.pyqtSlot()
def doDelete(self):
"""
Delete the currently selected items from the active diagram.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
items = diagram.selectedItems()
if items:
items.extend([x for item in items if item.isNode() for x in item.edges if x not in items])
self.common_commands_for_cut_delete_purge(diagram, items)
@QtCore.pyqtSlot()
def doExport(self):
"""
Export the current project.
"""
if not self.project.isEmpty():
dialog = QtWidgets.QFileDialog(self)
dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
dialog.setDirectory(expandPath('~/'))
dialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
dialog.setNameFilters(
# self.ontologyExporterNameFilters() -> .owl
# self.projectExporterNameFilters(except{File.Graphol}) -> .csv
sorted(self.ontologyExporterNameFilters() + self.projectExporterNameFilters({File.Graphol})\
+ self.diagramExporterNameFilters({File.Pdf, File.GraphML})
))
dialog.setViewMode(QtWidgets.QFileDialog.Detail)
dialog.selectFile(self.project.name)
dialog.selectNameFilter(File.Owl.value)
if dialog.exec_():
filetype = File.valueOf(dialog.selectedNameFilter())
try:
worker = self.createOntologyExporter(filetype, self.project, self)
except ValueError:
try:
worker = self.createProjectExporter(filetype, self.project, self)
except ValueError:
arbitrary_diagram = list(self.project.diagrams())[0]
if arbitrary_diagram:
worker = self.createDiagramExporter(filetype, arbitrary_diagram, self)
else:
LOGGER.critical('no diagram present in the project')
worker.run(expandPath(first(dialog.selectedFiles())))
@QtCore.pyqtSlot('QGraphicsScene')
def doFocusDiagram(self, diagram):
"""
Focus the given diagram in the MDI area.
:type diagram: Diagram
"""
subwindow = self.mdi.subWindowForDiagram(diagram)
if not subwindow:
view = self.createDiagramView(diagram)
subwindow = self.createMdiSubWindow(view)
subwindow.showMaximized()
self.mdi.setActiveSubWindow(subwindow)
self.mdi.update()
self.sgnDiagramFocused.emit(diagram)
@QtCore.pyqtSlot('QGraphicsItem')
def doFocusItem(self, item):
"""
Focus an item in its diagram.
:type item: AbstractItem
"""
self.sgnFocusDiagram.emit(item.diagram)
self.mdi.activeDiagram().clearSelection()
self.mdi.activeView().centerOn(item)
item.setSelected(True)
@QtCore.pyqtSlot()
def doImport(self):
"""
Import an ontology into the currently active Project.
"""
dialog = QtWidgets.QFileDialog(self)
dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptOpen)
dialog.setDirectory(expandPath('~'))
dialog.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
dialog.setViewMode(QtWidgets.QFileDialog.Detail)
dialog.setNameFilters(self.ontologyLoaderNameFilters())
if dialog.exec_():
filetype = File.valueOf(dialog.selectedNameFilter())
selected = [x for x in dialog.selectedFiles() if File.forPath(x) is filetype and fexists(x)]
if selected:
try:
with BusyProgressDialog(parent=self) as progress:
for path in selected:
progress.setWindowTitle('Importing {0}...'.format(os.path.basename(path)))
worker = self.createOntologyLoader(filetype, path, self.project, self)
worker.run()
except Exception as e:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setDetailedText(format_exception(e))
msgbox.setIconPixmap(QtGui.QIcon(':/icons/48/ic_error_outline_black').pixmap(48))
msgbox.setStandardButtons(QtWidgets.QMessageBox.Close)
msgbox.setText('Eddy could not import all the selected files!')
msgbox.setWindowIcon(QtGui.QIcon(':/icons/128/ic_eddy'))
msgbox.setWindowTitle('Import failed!')
msgbox.exec_()
@QtCore.pyqtSlot()
def doInvertRole(self):
"""
Swap the direction of all the occurrences of the selected role.
"""
def invert(item):
"""
Invert the type of a node.
:type item: Item
:rtype: Item
"""
if item is Item.DomainRestrictionNode:
return Item.RangeRestrictionNode
return Item.DomainRestrictionNode
f0 = lambda x: x.type() is Item.RoleNode
f1 = lambda x: x.type() is Item.InputEdge
f2 = lambda x: x.type() in {Item.DomainRestrictionNode, Item.RangeRestrictionNode}
f3 = lambda x: x.type() is Item.RoleInverseNode
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
node = first(x for x in diagram.selectedNodes(filter_on_nodes=f0))
if node:
swappable = set()
collection = dict()
predicates = self.project.predicates(node.type(), node.text())
for predicate in predicates:
swappable = set.union(swappable, predicate.outgoingNodes(filter_on_edges=f1, filter_on_nodes=f2))
for inv in predicate.outgoingNodes(filter_on_edges=f1, filter_on_nodes=f3):
swappable = set.union(swappable, inv.outgoingNodes(filter_on_edges=f1, filter_on_nodes=f2))
for xnode in swappable:
ynode = xnode.diagram.factory.create(invert(xnode.type()))
ynode.setPos(xnode.pos())
ynode.setText(xnode.text())
ynode.setTextPos(xnode.textPos())
collection[xnode] = ynode
if collection:
self.undostack.beginMacro("swap '{0}' domain and range".format(node.text()))
for xnode, ynode in collection.items():
self.undostack.push(CommandNodeSwitchTo(xnode.diagram, xnode, ynode))
self.undostack.endMacro()
@QtCore.pyqtSlot()
def doLookupOccurrence(self):
"""
Focus the item which is being held by the supplying QAction.
"""
self.sgnFocusItem.emit(self.sender().data())
@QtCore.pyqtSlot()
def refactorsetprefix(self):
node = self.sender().data()
to_prefix = self.sender().text()
if to_prefix == ':':
to_iri = None
for iri_itr in self.project.IRI_prefixes_nodes_dict.keys():
if 'display_in_widget' in self.project.IRI_prefixes_nodes_dict[iri_itr][2]:
to_iri = iri_itr
break
if to_iri is None:
self.statusBar().showMessage(
': prefix does not correspond to any IRI in the widget. please contact developer.')
else:
to_iri = self.project.get_iri_for_prefix(to_prefix)
from_prefix = self.project.get_prefix_of_node(node)
from_iri = self.project.get_iri_of_node(node)
#print('from_prefix', from_prefix)
#print('to_prefix',to_prefix)
#print('from_iri', from_iri)
#print('to_iri',to_iri)
#print('node',node)
# case 1
if from_prefix == to_prefix:
# print('from_prefix == to_prefix')
return
Duplicate_dict_1 = self.project.copy_IRI_prefixes_nodes_dictionaries(self.project.IRI_prefixes_nodes_dict, dict())
Duplicate_dict_2 = self.project.copy_IRI_prefixes_nodes_dictionaries(self.project.IRI_prefixes_nodes_dict, dict())
commands = []
# case 2
if from_iri == to_iri:
if to_prefix != ':':
Duplicate_dict_1[from_iri][0].remove(to_prefix)
Duplicate_dict_1[from_iri][0].append(to_prefix)
commands.append(CommandProjectDisconnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
commands.append(CommandProjetSetIRIPrefixesNodesDict(self.project, Duplicate_dict_2, Duplicate_dict_1, [from_iri], None))
commands.append(CommandProjectConnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
# case 3
else:
metaDataChanged_ADD_OK_var = set()
metaDataChanged_REMOVE_OK_var = set()
metaDataChanged_IGNORE_var = set()
@QtCore.pyqtSlot(str, str, str)
def metaDataChanged_REMOVE_OK(iri, node, message):
# print('metaDataChanged_REMOVE_OK -', iri, ',', node, ',', message)
metaDataChanged_REMOVE_OK_var.add(True)
@QtCore.pyqtSlot(str, str, str)
def metaDataChanged_ADD_OK(iri, node, message):
# print('metaDataChanged_ADD_OK -', iri, ',', node, ',', message)
metaDataChanged_ADD_OK_var.add(True)
@QtCore.pyqtSlot(str, str, str)
def metaDataChanged_IGNORE(iri, node, message):
# if node.id is None:
# print('metaDataChanged_IGNORE >', iri, '-', 'None', '-', message)
# else:
# print('metaDataChanged_IGNORE >', iri, '-', node, '-', message)
metaDataChanged_IGNORE_var.add(True)
connect(self.project.sgnIRINodeEntryAdded, metaDataChanged_ADD_OK)
connect(self.project.sgnIRINodeEntryRemoved, metaDataChanged_REMOVE_OK)
connect(self.project.sgnIRINodeEntryIgnored, metaDataChanged_IGNORE)
list_of_nodes_to_process = []
for n in self.project.nodes():
if (self.project.get_iri_of_node(n) == from_iri) and (n.remaining_characters == node.remaining_characters):
list_of_nodes_to_process.append(n)
for n in list_of_nodes_to_process:
self.project.removeIRINodeEntry(Duplicate_dict_1, from_iri, n)
self.project.addIRINodeEntry(Duplicate_dict_1, to_iri, n)
if ((False not in metaDataChanged_REMOVE_OK_var) and (False not in metaDataChanged_ADD_OK_var)) \
and (True not in metaDataChanged_IGNORE_var):
pass
else:
LOGGER.warning('redo != undo but transaction was not executed correctly')
self.statusBar().showMessage('transaction was not executed correctly for node' + str(n), 15000)
return
# part 2
if to_prefix != ':':
Duplicate_dict_1[to_iri][0].remove(to_prefix)
Duplicate_dict_1[to_iri][0].append(to_prefix)
commands.append(CommandProjectDisconnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
commands.append(CommandProjetSetIRIPrefixesNodesDict(self.project, Duplicate_dict_2, Duplicate_dict_1,
[from_iri, to_iri], list_of_nodes_to_process))
commands.append(CommandProjectConnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
if any(commands):
self.undostack.beginMacro('edit {0} refactorsetprefix'.format(node.name))
for command in commands:
if command:
self.undostack.push(command)
self.undostack.endMacro()
@QtCore.pyqtSlot()
def setprefix(self):
node = self.sender().data()
to_prefix = self.sender().text()
if to_prefix == ':':
to_iri = None
for iri_itr in self.project.IRI_prefixes_nodes_dict.keys():
if 'display_in_widget' in self.project.IRI_prefixes_nodes_dict[iri_itr][2]:
to_iri = iri_itr
break
if to_iri is None:
self.statusBar().showMessage(': prefix does not correspond to any IRI in the widget. please contact developer.')
else:
to_iri = self.project.get_iri_for_prefix(to_prefix)
from_prefix = self.project.get_prefix_of_node(node)
from_iri = self.project.get_iri_of_node(node)
#print('from_prefix', from_prefix)
#print('to_prefix',to_prefix)
#print('from_iri', from_iri)
#print('to_iri',to_iri)
#print('node',node)
#case 1
if from_prefix == to_prefix:
#print('from_prefix == to_prefix')
return
Duplicate_dict_1 = self.project.copy_IRI_prefixes_nodes_dictionaries(self.project.IRI_prefixes_nodes_dict, dict())
Duplicate_dict_2 = self.project.copy_IRI_prefixes_nodes_dictionaries(self.project.IRI_prefixes_nodes_dict, dict())
commands = []
#case 2
if from_iri == to_iri:
if to_prefix != ':':
Duplicate_dict_1[from_iri][0].remove(to_prefix)
Duplicate_dict_1[from_iri][0].append(to_prefix)
commands.append(
CommandProjectDisconnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
commands.append(CommandProjetSetIRIPrefixesNodesDict(self.project, Duplicate_dict_2, Duplicate_dict_1,
[from_iri], None))
commands.append(CommandProjectConnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
#case 3
else:
metaDataChanged_ADD_OK_var = set()
metaDataChanged_REMOVE_OK_var = set()
metaDataChanged_IGNORE_var = set()
@QtCore.pyqtSlot(str, str, str)
def metaDataChanged_REMOVE_OK(iri, node, message):
#print('metaDataChanged_REMOVE_OK -', iri, ',', node, ',', message)
metaDataChanged_REMOVE_OK_var.add(True)
@QtCore.pyqtSlot(str, str, str)
def metaDataChanged_ADD_OK(iri, node, message):
#print('metaDataChanged_ADD_OK -', iri, ',', node, ',', message)
metaDataChanged_ADD_OK_var.add(True)
@QtCore.pyqtSlot(str, str, str)
def metaDataChanged_IGNORE(iri, node, message):
# if node.id is None:
# print('metaDataChanged_IGNORE >', iri, '-', 'None', '-', message)
# else:
#print('metaDataChanged_IGNORE >', iri, '-', node, '-', message)
metaDataChanged_IGNORE_var.add(True)
connect(self.project.sgnIRINodeEntryAdded, metaDataChanged_ADD_OK)
connect(self.project.sgnIRINodeEntryRemoved, metaDataChanged_REMOVE_OK)
connect(self.project.sgnIRINodeEntryIgnored, metaDataChanged_IGNORE)
self.project.removeIRINodeEntry(Duplicate_dict_1, from_iri, node)
self.project.addIRINodeEntry(Duplicate_dict_1, to_iri, node)
if ((False not in metaDataChanged_REMOVE_OK_var) and (False not in metaDataChanged_ADD_OK_var)) \
and(True not in metaDataChanged_IGNORE_var):
pass
else:
LOGGER.warning('redo != undo but transaction was not executed correctly')
self.statusBar().showMessage('transaction was not executed correctly for node' + str(node),15000)
return
#part 2
if to_prefix != ':':
Duplicate_dict_1[to_iri][0].remove(to_prefix)
Duplicate_dict_1[to_iri][0].append(to_prefix)
commands.append(
CommandProjectDisconnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
commands.append(CommandProjetSetIRIPrefixesNodesDict(self.project, Duplicate_dict_2, Duplicate_dict_1,
[from_iri, to_iri], [node]))
commands.append(CommandProjectConnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
if any(commands):
self.undostack.beginMacro('edit {0} setprefix'.format(node.name))
for command in commands:
if command:
self.undostack.push(command)
self.undostack.endMacro()
@QtCore.pyqtSlot()
def doNewDiagram(self):
"""
Create a new diagram.
"""
form = NewDiagramForm(self.project, self)
if form.exec_() == NewDiagramForm.Accepted:
settings = QtCore.QSettings(ORGANIZATION, APPNAME)
size = settings.value('diagram/size', 5000, int)
name = form.nameField.value()
diagram = Diagram.create(name, size, self.project)
connect(diagram.sgnItemAdded, self.project.doAddItem)
connect(diagram.sgnItemRemoved, self.project.doRemoveItem)
connect(diagram.selectionChanged, self.doUpdateState)
self.undostack.push(CommandDiagramAdd(diagram, self.project))
self.sgnFocusDiagram.emit(diagram)
@QtCore.pyqtSlot()
def doOpen(self):
"""
Open a project in a new session.
"""
settings = QtCore.QSettings(ORGANIZATION, APPNAME)
workspace = settings.value('workspace/home', WORKSPACE, str)
dialog = QtWidgets.QFileDialog(self)
dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptOpen)
dialog.setDirectory(expandPath(workspace))
dialog.setFileMode(QtWidgets.QFileDialog.Directory)
dialog.setOption(QtWidgets.QFileDialog.ShowDirsOnly, True)
dialog.setViewMode(QtWidgets.QFileDialog.Detail)
if dialog.exec_() == QtWidgets.QFileDialog.Accepted:
self.app.sgnCreateSession.emit(expandPath(first(dialog.selectedFiles())))
@QtCore.pyqtSlot()
def doOpenRecent(self):
"""
Open a recent project in a new session.
"""
action = self.sender()
path = expandPath(action.data())
if path != expandPath(self.project.path):
self.app.sgnCreateSession.emit(expandPath(action.data()))
@QtCore.pyqtSlot()
def doOpenDialog(self):
"""
Open a dialog window by initializing it using the class stored in action data.
"""
action = self.sender()
dialog = action.data()
window = dialog(self)
window.hide()
window.setWindowModality(QtCore.Qt.NonModal)
window.show()
window.exec_()
@QtCore.pyqtSlot()
def doOpenURL(self):
"""
Open a URL using the operating system default browser.
"""
action = self.sender()
weburl = action.data()
if weburl:
# noinspection PyTypeChecker,PyCallByClass,PyCallByClass
QtGui.QDesktopServices.openUrl(QtCore.QUrl(weburl))
@QtCore.pyqtSlot()
def doOpenDiagramProperties(self):
"""
Executed when scene properties needs to be displayed.
"""
diagram = self.sender().data() or self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
properties = self.pf.create(diagram)
properties.exec_()
@QtCore.pyqtSlot()
def doOpenNodeProperties(self):
"""
Executed when node properties needs to be displayed.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
node = first(diagram.selectedNodes())
if node:
properties = self.pf.create(diagram, node)
properties.exec_()
@QtCore.pyqtSlot()
def doOpenNodeDescription(self):
"""
Executed when node description needs to be displayed.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
node = first(diagram.selectedNodes())
if node:
description = self.df.create(diagram, node)
description.exec()
@QtCore.pyqtSlot()
def doPaste(self):
"""
Paste previously copied items.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
if not self.clipboard.empty():
self.clipboard.paste(diagram, diagram.mp_Pos)
@QtCore.pyqtSlot()
def doPrint(self):
"""
Print the active diagram.
"""
diagram = self.mdi.activeDiagram()
if diagram:
worker = PrinterDiagramExporter(diagram, self)
worker.run()
@QtCore.pyqtSlot()
def doPurge(self):
"""
Delete the currently selected items by also removing no more necessary elements.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
items = set(diagram.selectedItems())
purge = set()
for item in items:
if item.isNode():
for node in item.definition():
if item.isConstructor():
if node not in items:
# Here we examine a node which is included in the definition of a node
# in the original selection, but it's not included in the selection itself.
# If the node contribute only to the definition on this node and has no
# relation with any other node in the diagram, which is not in the original
# item selection, we will remove it.
if node.adjacentNodes(filter_on_nodes=lambda x: x not in items):
continue
purge.add(node)
collection = list(items | purge)
if collection:
collection.extend(
[x for item in collection if item.isNode() for x in item.edges if x not in collection])
self.common_commands_for_cut_delete_purge(diagram, collection)
#self.undostack.push(CommandItemsRemove(diagram, collection))
@QtCore.pyqtSlot()
def doQuit(self):
"""
Quit Eddy.
"""
self.close()
self.sgnQuit.emit()
@QtCore.pyqtSlot()
def doRefactorBrush(self):
"""
Change the node brush for all the predicate nodes matching the selected predicate.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fn = lambda x: x.type() in {Item.ConceptNode, Item.RoleNode, Item.AttributeNode, Item.IndividualNode}
node = first(diagram.selectedNodes(filter_on_nodes=fn))
if node:
action = self.sender()
color = action.data()
nodes = self.project.predicates(node.type(), node.text())
self.undostack.push(CommandNodeSetBrush(diagram, nodes, QtGui.QBrush(QtGui.QColor(color.value))))
@QtCore.pyqtSlot()
def doRefactorName(self):
"""
Rename all the instance of the selected predicate node.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fn = lambda x: x.type() in {Item.ConceptNode, Item.RoleNode, Item.AttributeNode, Item.IndividualNode}
node = first(diagram.selectedNodes(filter_on_nodes=fn))
if node:
dialog = RefactorNameForm(node, self)
dialog.exec_()
@QtCore.pyqtSlot()
def doRelocateLabel(self):
"""
Reset the selected node label to its default position.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fn = lambda x: x.label is not None
node = first(diagram.selectedNodes(filter_on_nodes=fn))
if node and node.label.isMovable():
undo = node.label.pos()
redo = node.label.defaultPos()
self.undostack.push(CommandLabelMove(diagram, node, undo, redo))
@QtCore.pyqtSlot()
def doRemoveBreakpoint(self):
"""
Remove the edge breakpoint specified in the action triggering this slot.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
action = self.sender()
edge, breakpoint = action.data()
if 0 <= breakpoint < len(edge.breakpoints):
self.undostack.push(CommandEdgeBreakpointRemove(diagram, edge, breakpoint))
@QtCore.pyqtSlot()
def doRemoveDiagram(self):
"""
Removes a diagram from the current project.
"""
action = self.sender()
diagram = action.data()
if diagram:
self.undostack.push(CommandDiagramRemove(diagram, self.project))
@QtCore.pyqtSlot()
def doRenameDiagram(self):
"""
Renames a diagram.
"""
action = self.sender()
diagram = action.data()
if diagram:
form = RenameDiagramForm(self.project, diagram, self)
if form.exec_() == RenameDiagramForm.Accepted:
name = form.nameField.value()
self.undostack.push(CommandDiagramRename(diagram.name, name, diagram, self.project))
@QtCore.pyqtSlot()
def doSave(self):
"""
Save the current project.
"""
try:
worker = self.createProjectExporter(File.Graphol, self.project, self)
worker.run()
except Exception as e:
msgbox = QtWidgets.QMessageBox(self)
msgbox.setDetailedText(format_exception(e))
msgbox.setIconPixmap(QtGui.QIcon(':/icons/48/ic_error_outline_black').pixmap(48))
msgbox.setStandardButtons(QtWidgets.QMessageBox.Close)
msgbox.setText('Eddy could not save the current project!')
msgbox.setWindowIcon(QtGui.QIcon(':/icons/128/ic_eddy'))
msgbox.setWindowTitle('Save failed!')
msgbox.exec_()
else:
self.undostack.setClean()
self.sgnProjectSaved.emit()
@QtCore.pyqtSlot()
def doSaveAs(self):
"""
Creates a copy of the currently open diagram.
"""
diagram = self.mdi.activeDiagram()
if diagram:
dialog = QtWidgets.QFileDialog(self)
dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
dialog.setDirectory(expandPath('~/'))
dialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
dialog.setNameFilters(self.diagramExporterNameFilters({File.Xml}))
dialog.setViewMode(QtWidgets.QFileDialog.Detail)
dialog.selectFile(diagram.name)
dialog.selectNameFilter(File.Pdf.value)
if dialog.exec_():
filetype = File.valueOf(dialog.selectedNameFilter())
worker = self.createDiagramExporter(filetype, diagram, self)
worker.run(expandPath(first(dialog.selectedFiles())))
@QtCore.pyqtSlot()
def doSelectAll(self):
"""
Select all the items in the active diagrsm.
"""
diagram = self.mdi.activeDiagram()
if diagram:
path = QtGui.QPainterPath()
path.addRect(diagram.sceneRect())
diagram.setSelectionArea(path)
diagram.setMode(DiagramMode.Idle)
@QtCore.pyqtSlot()
def doSendToBack(self):
"""
Send the selected item to the back of the diagram.
"""
diagram = self.mdi.activeDiagram()
if diagram:
commands = []
diagram.setMode(DiagramMode.Idle)
for node in diagram.selectedNodes():
zValue = 0
for item in [x for x in node.collidingItems() if x.type() is not Item.Label]:
if item.zValue() <= zValue:
zValue = item.zValue() - 0.2
if zValue != node.zValue():
commands.append(CommandNodeSetDepth(diagram, node, zValue))
if commands:
if len(commands) > 1:
self.undostack.beginMacro('change the depth of {0} nodes'.format(len(commands)))
for command in commands:
self.undostack.push(command)
self.undostack.endMacro()
else:
self.undostack.push(first(commands))
@QtCore.pyqtSlot()
def doSetNodeBrush(self):
"""
Set the brush of selected nodes.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
action = self.sender()
color = action.data()
brush = QtGui.QBrush(QtGui.QColor(color.value))
supported = {Item.ConceptNode, Item.RoleNode, Item.AttributeNode, Item.IndividualNode}
fn = lambda x: x.type() in supported and x.brush() != brush
selected = diagram.selectedNodes(filter_on_nodes=fn)
if selected:
self.undostack.push(CommandNodeSetBrush(diagram, selected, brush))
@QtCore.pyqtSlot()
def doSetPropertyRestriction(self):
"""
Set a property domain / range restriction.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fn = lambda x: x.type() in {Item.DomainRestrictionNode, Item.RangeRestrictionNode}
node = first(diagram.selectedNodes(filter_on_nodes=fn))
if node:
data = None
action = self.sender()
restriction = action.data()
if restriction is not Restriction.Cardinality:
data = restriction.toString()
else:
form = CardinalityRestrictionForm(self)
if form.exec_() == CardinalityRestrictionForm.Accepted:
data = restriction.toString(form.min(), form.max())
if data and node.text() != data:
name = 'change {0} to {1}'.format(node.shortName, data)
self.undostack.push(CommandLabelChange(diagram, node, node.text(), data, name=name))
@QtCore.pyqtSlot()
def doSetIndividualAs(self):
"""
Set an invididual node either to Individual or Value.
Will bring up the Value Form if needed.
"""
#print('>>> Session (doSetIndividualAs)')
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fn = lambda x: x.type() is Item.IndividualNode
node = first(diagram.selectedNodes(filter_on_nodes=fn))
if node:
action = self.sender()
if action.data() is Identity.Individual:
if node.identity() is Identity.Value:
data = node.label.template
name = 'change {0} to {1}'.format(node.text(), data)
Duplicate_dict_1 = self.project.copy_IRI_prefixes_nodes_dictionaries\
(self.project.IRI_prefixes_nodes_dict,dict())
Duplicate_dict_2 = self.project.copy_IRI_prefixes_nodes_dictionaries\
(self.project.IRI_prefixes_nodes_dict,dict())
old_iri = self.project.get_iri_of_node(node)
new_iri = self.project.iri
if self.project.prefix is None:
new_label = self.project.get_full_IRI(new_iri,None,data)
#new_label = str(new_iri+'#'+data)
else:
new_label = str(self.project.prefix+':'+data)
#data = data.replace('\n','')
Duplicate_dict_1[old_iri][1].remove(node)
Duplicate_dict_1[new_iri][1].add(node)
commands = []
commands.append(CommandProjectDisconnectSpecificSignals(self.project))
commands.append(CommandLabelChange(diagram, node, node.text(), new_label))
commands.append(CommandProjetSetIRIPrefixesNodesDict(self.project,Duplicate_dict_2,Duplicate_dict_1, [old_iri, new_iri], [node]))
commands.append(CommandNodeSetRemainingCharacters(node.remaining_characters, data, node, self.project))
commands.append(CommandLabelChange(diagram, node, node.text(), new_label))
commands.append(CommandProjectConnectSpecificSignals(self.project))
if any(commands):
self.undostack.beginMacro('edit Forms >> accept() {0}'.format(node))
for command in commands:
if command:
self.undostack.push(command)
self.undostack.endMacro()
elif action.data() is Identity.Value:
form = ValueForm(node, self)
form.exec_()
#print('>>> Session (doSetIndividualAs) END')
@QtCore.pyqtSlot()
def doSetNodeSpecial(self):
"""
Set the special type of the selected node.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
action = self.sender()
fn = lambda x: x.type() in {Item.ConceptNode, Item.RoleNode, Item.AttributeNode}
node = first(diagram.selectedNodes(filter_on_nodes=fn))
if node:
special = action.data()
data = special.value
if node.text() != data:
name = 'change {0} to {1}'.format(node.shortName, data)
old_iri = self.project.get_iri_of_node(node)
new_iri = 'http://www.w3.org/2002/07/owl'
if node.type() is Item.ConceptNode:
new_rc_lst = ['Nothing','Thing']
if node.type() is Item.RoleNode:
new_rc_lst = ['BottomObjectProperty','TopObjectProperty']
if node.type() is Item.AttributeNode:
new_rc_lst = ['BottomDataProperty','TopDataProperty']
if data == 'TOP':
new_rc = new_rc_lst[1]
elif data == 'BOTTOM':
new_rc = new_rc_lst[0]
#new_rc = new_rc.replace('\n','')
Duplicate_dict_1 = self.project.copy_IRI_prefixes_nodes_dictionaries \
(self.project.IRI_prefixes_nodes_dict, dict())
Duplicate_dict_2 = self.project.copy_IRI_prefixes_nodes_dictionaries \
(self.project.IRI_prefixes_nodes_dict, dict())
Duplicate_dict_1[old_iri][1].remove(node)
Duplicate_dict_1[new_iri][1].add(node)
commands = []
commands.append(CommandProjectDisconnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
commands.append(CommandProjetSetIRIPrefixesNodesDict(self.project, Duplicate_dict_2, Duplicate_dict_1, [old_iri, new_iri], [node]))
commands.append(CommandNodeSetRemainingCharacters(node.remaining_characters, new_rc, node, diagram.project))
commands.append(CommandProjectConnectSpecificSignals(self.project, regenerate_label_of_nodes_for_iri=False))
if any(commands):
self.undostack.beginMacro('edit {0} doSetNodeSpecial'.format(node))
for command in commands:
if command:
self.undostack.push(command)
self.undostack.endMacro()
@QtCore.pyqtSlot()
def doSetDatatype(self):
"""
Set the datatype of the selected value-domain node.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fn = lambda x: x.type() is Item.ValueDomainNode
node = first(diagram.selectedNodes(filter_on_nodes=fn))
if node:
action = self.sender()
datatype = action.data()
data = datatype.value
if node.text() != data:
name = 'change {0} to {1}'.format(node.shortName, data)
self.undostack.push(CommandLabelChange(diagram, node, node.text(), data, name=name))
@QtCore.pyqtSlot()
def doSetFacet(self):
"""
Set the facet of a Facet node.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fn = lambda x: x.type() is Item.FacetNode
node = first(diagram.selectedNodes(filter_on_nodes=fn))
if node:
action = self.sender()
facet = action.data()
if facet != node.facet:
data = node.compose(facet, node.value)
name = 'change {0} to {1}'.format(node.facet.value, facet.value)
self.undostack.push(CommandLabelChange(diagram, node, node.text(), data, name=name))
@QtCore.pyqtSlot()
def doSetProfile(self):
"""
Set the currently used project profile.
"""
widget = self.widget('profile_switch')
profile = widget.currentText()
if self.project.profile.name() != profile:
self.undostack.push(CommandProjectSetProfile(self.project, self.project.profile.name(), profile))
widget.clearFocus()
@QtCore.pyqtSlot()
def doSnapTopGrid(self):
"""
Snap all the element in the active diagram to the grid.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
data = {'redo': {'nodes': {}, 'edges': {}}, 'undo': {'nodes': {}, 'edges': {}}}
for item in diagram.items():
if item.isNode():
undoPos = item.pos()
redoPos = snap(undoPos, Diagram.GridSize)
if undoPos != redoPos:
data['undo']['nodes'][item] = {
'pos': undoPos,
'anchors': {k: v for k, v in item.anchors.items()}
}
data['redo']['nodes'][item] = {
'pos': redoPos,
'anchors': {k: v + redoPos - undoPos for k, v in item.anchors.items()}
}
elif item.isEdge():
undoPts = item.breakpoints
redoPts = [snap(x, Diagram.GridSize) for x in undoPts]
if undoPts != redoPts:
data['undo']['edges'][item] = {'breakpoints': undoPts}
data['redo']['edges'][item] = {'breakpoints': redoPts}
if data['undo']['nodes'] or data['undo']['edges']:
self.undostack.push(CommandSnapItemsToGrid(diagram, data))
@QtCore.pyqtSlot()
def doSwapEdge(self):
"""
Swap the selected edges by inverting source/target points.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fe = lambda x: x.isSwapAllowed()
selected = diagram.selectedEdges(filter_on_edges=fe)
if selected:
self.undostack.push(CommandEdgeSwap(diagram, selected))
@QtCore.pyqtSlot()
def doSwitchOperatorNode(self):
"""
Switch the selected operator node to a different type.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fn = lambda x: Item.UnionNode <= x.type() <= Item.DisjointUnionNode
node = first([x for x in diagram.selectedNodes(filter_on_nodes=fn)])
if node:
action = self.sender()
if node.type() is not action.data():
xnode = diagram.factory.create(action.data())
xnode.setPos(node.pos())
self.undostack.push(CommandNodeSwitchTo(diagram, node, xnode))
@QtCore.pyqtSlot()
def doSwitchRestrictionNode(self):
"""
Switch the selected restriction node to a different type.
"""
diagram = self.mdi.activeDiagram()
if diagram:
diagram.setMode(DiagramMode.Idle)
fn = lambda x: x.type() in {Item.DomainRestrictionNode, Item.RangeRestrictionNode}
node = first([x for x in diagram.selectedNodes(filter_on_nodes=fn)])
if node:
action = self.sender()
if node.type() is not action.data():
xnode = diagram.factory.create(action.data())
xnode.setPos(node.pos())
xnode.setText(node.text())
xnode.setTextPos(node.textPos())
self.undostack.push(CommandNodeSwitchTo(diagram, node, xnode))
@QtCore.pyqtSlot()
def doSyntaxCheck(self):
"""
Perform syntax checking on the active diagram.
"""
dialog = SyntaxValidationDialog(self.project, self)
dialog.exec_()
@QtCore.pyqtSlot()
def doSelectReasoner(self):
"""
Set the currently used project profile.
"""
widget = self.widget('select_reasoner')
reasoner = widget.currentText()
widget.clearFocus()
@QtCore.pyqtSlot()
def doOntologyConsistencyCheck(self):
"""
Perform Ontology Consistency checking on the active ontology/diagram.
"""
dialog = OntologyConsistencyCheckDialog(self.project, self)
dialog.exec_()
@QtCore.pyqtSlot()
def doOpenOntologyExplorer(self):
"""
Perform Ontology Consistency checking on the active ontology/diagram.
"""
dialog = OntologyExplorerDialog(self.project, self)
dialog.exec_()
@QtCore.pyqtSlot()
def BackgrounddeColourNodesAndEdges(self,**kwargs):
call_update_node = kwargs.get('call_updateNode',True)
call_ClearInconsistentEntitiesAndDiagItemsData = kwargs.get('call_ClearInconsistentEntitiesAndDiagItemsData',True)
brush = QtGui.QBrush(QtCore.Qt.NoBrush)
all_nodes = list(self.project.nodes())
all_edges = list(self.project.edges())
for node in all_nodes:
node.selection.setBrush(brush)
node.setCacheMode(AbstractItem.NoCache)
node.setCacheMode(AbstractItem.DeviceCoordinateCache)
node.update(node.boundingRect())
if call_update_node is True:
node.updateNode()
for edge in all_edges:
edge.selection.setBrush(brush)
edge.setCacheMode(AbstractItem.NoCache)
edge.setCacheMode(AbstractItem.DeviceCoordinateCache)
edge.update(edge.boundingRect())
if call_ClearInconsistentEntitiesAndDiagItemsData:
self.ClearInconsistentEntitiesAndDiagItemsData()
diags = self.project.diagrams()
for d in diags:
d.sgnUpdated.emit()
@QtCore.pyqtSlot()
def ClearInconsistentEntitiesAndDiagItemsData(self):
self.project.ontology_OWL = None
self.project.axioms_to_nodes_edges_mapping = None
self.project.unsatisfiable_classes = []
self.project.explanations_for_unsatisfiable_classes = []
self.project.unsatisfiable_attributes = []
self.project.explanations_for_unsatisfiable_attributes = []
self.project.unsatisfiable_roles = []
self.project.explanations_for_unsatisfiable_roles = []
self.project.inconsistent_ontology = None
self.project.explanations_for_inconsistent_ontology = []
self.project.uc_as_input_for_explanation_explorer = None
self.project.nodes_of_unsatisfiable_entities = []
self.project.nodes_or_edges_of_axioms_to_display_in_widget = []
self.project.nodes_or_edges_of_explanations_to_display_in_widget = []
self.project.converted_nodes = dict()
@QtCore.pyqtSlot()
def doToggleGrid(self):
"""
Toggle snap to grid setting and viewport display.
"""
settings = QtCore.QSettings(ORGANIZATION, APPNAME)
settings.setValue('diagram/grid', self.action('toggle_grid').isChecked())
settings.sync()
for subwindow in self.mdi.subWindowList():
subwindow.view.setGridSize(Diagram.GridSize)
viewport = subwindow.view.viewport()
viewport.update()
@QtCore.pyqtSlot()
def doUpdateState(self):
"""
Update built-in actions according to the application state.
"""
isDomainRangeUsable = False
isDiagramActive = False
isClipboardEmpty = True
isEdgeSelected = False
isEdgeSwapEnabled = False
isNodeSelected = False
isPredicateSelected = False
isProjectEmpty = self.project.isEmpty()
isUndoStackClean = self.undostack.isClean()
if self.mdi.subWindowList():
diagram = self.mdi.activeDiagram()
restrictables = {Item.AttributeNode, Item.RoleNode}
predicates = {Item.ConceptNode, Item.AttributeNode, Item.RoleNode, Item.IndividualNode}
if diagram:
nodes = diagram.selectedNodes()
edges = diagram.selectedEdges()
isDiagramActive = True
isClipboardEmpty = self.clipboard.empty()
isEdgeSelected = first(edges) is not None
isNodeSelected = first(nodes) is not None
isDomainRangeUsable = any([x.type() in restrictables for x in nodes])
isPredicateSelected = any([x.type() in predicates for x in nodes])
if isEdgeSelected:
for edge in edges:
isEdgeSwapEnabled = edge.isSwapAllowed()
if isEdgeSwapEnabled:
break
self.action('bring_to_front').setEnabled(isNodeSelected)
self.action('center_diagram').setEnabled(isDiagramActive)
self.action('cut').setEnabled(isNodeSelected)
self.action('copy').setEnabled(isNodeSelected)
self.action('delete').setEnabled(isNodeSelected or isEdgeSelected)
self.action('purge').setEnabled(isNodeSelected)
self.action('export').setEnabled(not isProjectEmpty)
self.action('paste').setEnabled(not isClipboardEmpty)
self.action('property_domain').setEnabled(isDomainRangeUsable)
self.action('property_domain_range').setEnabled(isDomainRangeUsable)
self.action('property_range').setEnabled(isDomainRangeUsable)
self.action('save').setEnabled(not isUndoStackClean)
self.action('save_as').setEnabled(isDiagramActive)
self.action('select_all').setEnabled(isDiagramActive)
self.action('send_to_back').setEnabled(isNodeSelected)
self.action('snap_to_grid').setEnabled(isDiagramActive)
self.action('syntax_check').setEnabled(not isProjectEmpty)
self.action('swap_edge').setEnabled(isEdgeSelected and isEdgeSwapEnabled)
self.action('toggle_grid').setEnabled(isDiagramActive)
self.widget('button_set_brush').setEnabled(isPredicateSelected)
self.widget('profile_switch').setCurrentText(self.project.profile.name())
if self.mdi.activeDiagram():
self.widget('select_reasoner').setEnabled(not isProjectEmpty)
self.action('decolour_nodes').setEnabled(not isProjectEmpty)
self.action('ontology_consistency_check').setEnabled(not isProjectEmpty)
@QtCore.pyqtSlot()
def onNoUpdateAvailable(self):
"""
Executed when the update worker thread terminates and no software update is available.
"""
progressBar = self.widget('progress_bar')
progressBar.setToolTip('')
progressBar.setVisible(False)
self.addNotification('No update available.')
@QtCore.pyqtSlot()
def onNoUpdateDataAvailable(self):
"""
Executed when the update worker thread terminates abnormally.
"""
progressBar = self.widget('progress_bar')
progressBar.setToolTip('')
progressBar.setVisible(False)
self.addNotification(textwrap.dedent("""
<b><font color="#7E0B17">ERROR</font></b>: Could not connect to update site:
unable to get update information.
"""))
@QtCore.pyqtSlot()
def onSessionReady(self):
"""
Executed when the session is initialized.
"""
## CONNECT PROJECT SPECIFIC SIGNALS
connect(self.project.sgnDiagramRemoved, self.mdi.onDiagramRemoved)
## CHECK FOR UPDATES ON STARTUP
settings = QtCore.QSettings(ORGANIZATION, APPNAME)
if settings.value('update/check_on_startup', True, bool):
action = self.action('check_for_updates')
action.trigger()
@QtCore.pyqtSlot(str, str)
def onUpdateAvailable(self, name, url):
"""
Executed when the update worker thread terminates and a new software update is available.
:type name: str
:type url: str
"""
progressBar = self.widget('progress_bar')
progressBar.setToolTip('')
progressBar.setVisible(False)
self.addNotification(textwrap.dedent("""
A new version of {} is available for download: <a href="{}"><b>{}</b></a>""".format(APPNAME, url, name)))
#############################################
# EVENTS
#################################
def closeEvent(self, closeEvent):
"""
Executed when the main window is closed.
:type closeEvent: QCloseEvent
"""
close = True
save = False
if not self.undostack.isClean():
msgbox = QtWidgets.QMessageBox(self)
msgbox.setIconPixmap(QtGui.QIcon(':/icons/48/ic_question_outline_black').pixmap(48))
msgbox.setWindowIcon(QtGui.QIcon(':/icons/128/ic_eddy'))
msgbox.setWindowTitle('Save changes?')
msgbox.setStandardButtons(
QtWidgets.QMessageBox.Cancel | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Yes)
msgbox.setText('Your project contains unsaved changes. Do you want to save?')
msgbox.exec_()
if msgbox.result() == QtWidgets.QMessageBox.Cancel:
close = False
elif msgbox.result() == QtWidgets.QMessageBox.No:
save = False
elif msgbox.result() == QtWidgets.QMessageBox.Yes:
save = True
if not close:
closeEvent.ignore()
else:
## SAVE THE CURRENT PROJECT IF NEEDED
if save:
self.sgnSaveProject.emit()
## DISPOSE ALL THE PLUGINS
for plugin in self.plugins():
self.pmanager.dispose(plugin)
self.pmanager.clear()
## DISPOSE ALL THE RUNNING THREADS
self.stopRunningThreads()
## HIDE ALL THE NOTIFICATION POPUPS
self.hideNotifications()
## SHUTDOWN THE ACTIVE SESSION
self.sgnClosed.emit()
closeEvent.accept()
LOGGER.info('Session shutdown completed: %s v%s [%s]', APPNAME, VERSION, self.project.name)
def keyPressEvent(self, keyEvent):
"""
Executed when a keyboard button is pressed
:type keyEvent: QKeyEvent
"""
if _MACOS:
if keyEvent.key() == QtCore.Qt.Key_Backspace:
action = self.action('delete')
action.trigger()
super().keyPressEvent(keyEvent)
def keyReleaseEvent(self, keyEvent):
"""
Executed when a keyboard button is released.
:type keyEvent: QKeyEvent
"""
if keyEvent.key() == QtCore.Qt.Key_Control:
diagram = self.mdi.activeDiagram()
if diagram and not diagram.isEdgeAdd():
diagram.setMode(DiagramMode.Idle)
super().keyReleaseEvent(keyEvent)
def showEvent(self, showEvent):
"""
Executed when the window is shown.
:type showEvent: QShowEvent
"""
self.setWindowState((self.windowState() & ~QtCore.Qt.WindowMinimized) | QtCore.Qt.WindowActive)
self.activateWindow()
self.raise_()
#############################################
# INTERFACE
#################################
def createDiagramView(self, diagram):
"""
Create a new diagram view displaying the given diagram.
:type diagram: Diagram
:rtype: DigramView
"""
view = DiagramView(diagram, self)
view.centerOn(0, 0)
return view
def createMdiSubWindow(self, widget):
"""
Create a subwindow in the MDI area that displays the given widget.
:type widget: QWidget
:rtype: MdiSubWindow
"""
subwindow = MdiSubWindow(widget)
subwindow = self.mdi.addSubWindow(subwindow)
subwindow.showMaximized()
return subwindow
def save(self):
"""
Save the current session state.
"""
settings = QtCore.QSettings(ORGANIZATION, APPNAME)
settings.setValue('session/geometry', self.saveGeometry())
settings.setValue('session/state', self.saveState())
settings.sync()
def setWindowTitle(self, project, diagram=None):
"""
Set the main window title.
:type project: Project
:type diagram: Diagram
"""
title = '{0} - [{1}]'.format(project.name, shortPath(project.path))
if diagram:
title = '{0} - {1}'.format(diagram.name, title)
super().setWindowTitle(title) | ashwingoldfish/eddy | eddy/ui/session.py | Python | gpl-3.0 | 111,490 | [
"VisIt"
] | fae2fc900d47050624198f5c6179668f0f1ad7a9645e460aa8fdc14b8cd2b666 |
"""
KeepNote
Editor widget in main window
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@alum.mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import gettext
import sys, os
# pygtk imports
import pygtk
pygtk.require('2.0')
from gtk import gdk
import gtk.glade
import gobject
try:
raise ImportError()
from gtksourceview2 import View as SourceView
from gtksourceview2 import Buffer as SourceBuffer
from gtksourceview2 import LanguageManager as SourceLanguageManager
except ImportError:
SourceView = None
# keepnote imports
import keepnote
from keepnote import \
KeepNoteError, is_url, unicode_gtk
from keepnote.notebook import \
NoteBookError, \
get_node_url, \
parse_node_url, \
is_node_url
from keepnote import notebook as notebooklib
from keepnote import safefile
from keepnote.gui import richtext
from keepnote.gui.richtext import \
RichTextView, RichTextBuffer, \
RichTextIO, RichTextError, RichTextImage
from keepnote.gui.richtext.richtext_tags import \
RichTextTagTable, RichTextLinkTag
from keepnote.gui import \
CONTEXT_MENU_ACCEL_PATH, \
FileChooserDialog, \
get_pixbuf, \
get_resource, \
get_resource_image, \
get_resource_pixbuf, \
Action, \
ToggleAction, \
add_actions, \
update_file_preview, \
dialog_find, \
dialog_image_resize
from keepnote.gui.icons import \
get_node_icon, lookup_icon_filename
from keepnote.gui.font_selector import FontSelector
from keepnote.gui.colortool import FgColorTool, BgColorTool
from keepnote.gui.richtext.richtext_tags import color_tuple_to_string
from keepnote.gui.popupwindow import PopupWindow
from keepnote.gui.linkcomplete import LinkPickerPopup
from keepnote.gui.link_editor import LinkEditor
from keepnote.gui.editor import KeepNoteEditor
from keepnote.gui.editor_richtext import ComboToolItem
_ = keepnote.translate
class TextEditor (KeepNoteEditor):
def __init__(self, app):
KeepNoteEditor.__init__(self, app)
self._app = app
self._notebook = None
self._link_picker = None
self._maxlinks = 10 # maximum number of links to show in link picker
# state
self._page = None # current NoteBookPage
self._page_scrolls = {} # remember scroll in each page
self._page_cursors = {}
self._textview_io = RichTextIO()
# textview and its callbacks
if SourceView:
self._textview = SourceView(SourceBuffer())
self._textview.get_buffer().set_highlight_syntax(True)
#self._textview.set_show_margin(True)
#self._textview.disable()
else:
self._textview = RichTextView(RichTextBuffer(
self._app.get_richtext_tag_table())) # textview
self._textview.disable()
self._textview.connect("modified", self._on_modified_callback)
self._textview.connect("visit-url", self._on_visit_url)
# scrollbars
self._sw = gtk.ScrolledWindow()
self._sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self._sw.set_shadow_type(gtk.SHADOW_IN)
self._sw.add(self._textview)
self.pack_start(self._sw)
#self._socket = gtk.Socket()
#self.pack_start(self._socket)
# menus
#self.editor_menus = EditorMenus(self._app, self)
# find dialog
#self.find_dialog = dialog_find.KeepNoteFindDialog(self)
self.show_all()
def set_notebook(self, notebook):
"""Set notebook for editor"""
# set new notebook
self._notebook = notebook
if self._notebook:
# read default font
pass
else:
# no new notebook, clear the view
self.clear_view()
def load_preferences(self, app_pref, first_open=False):
"""Load application preferences"""
#self.editor_menus.enable_spell_check(
# self._app.pref.get("editors", "general", "spell_check",
# default=True))
if not SourceView:
self._textview.set_default_font("Monospace 10")
def save_preferences(self, app_pref):
"""Save application preferences"""
# record state in preferences
#app_pref.set("editors", "general", "spell_check",
# self._textview.is_spell_check_enabled())
def get_textview(self):
"""Return the textview"""
return self._textview
def is_focus(self):
"""Return True if text editor has focus"""
return self._textview.is_focus()
def grab_focus(self):
"""Pass focus to textview"""
self._textview.grab_focus()
def clear_view(self):
"""Clear editor view"""
self._page = None
if not SourceView:
self._textview.disable()
def undo(self):
"""Undo the last action in the viewer"""
self._textview.undo()
def redo(self):
"""Redo the last action in the viewer"""
self._textview.redo()
def view_nodes(self, nodes):
"""View a page in the editor"""
# editor cannot view multiple nodes at once
# if asked to, it will view none
if len(nodes) > 1:
nodes = []
# save current page before changing nodes
self.save()
self._save_cursor()
if len(nodes) == 0:
self.clear_view()
else:
page = nodes[0]
self._page = page
if not SourceView:
self._textview.enable()
try:
if page.has_attr("payload_filename"):
#text = safefile.open(
# os.path.join(page.get_path(),
# page.get_attr("payload_filename")),
# codec="utf-8").read()
infile = page.open_file(
page.get_attr("payload_filename"), "r", "utf-8")
text = infile.read()
infile.close()
self._textview.get_buffer().set_text(text)
self._load_cursor()
if SourceView:
manager = SourceLanguageManager()
#print manager.get_language_ids()
#lang = manager.get_language_from_mime_type(
# page.get_attr("content_type"))
lang = manager.get_language("python")
self._textview.get_buffer().set_language(lang)
else:
self.clear_view()
except RichTextError, e:
self.clear_view()
self.emit("error", e.msg, e)
except Exception, e:
self.clear_view()
self.emit("error", "Unknown error", e)
if len(nodes) > 0:
self.emit("view-node", nodes[0])
def _save_cursor(self):
if self._page is not None:
it = self._textview.get_buffer().get_iter_at_mark(
self._textview.get_buffer().get_insert())
self._page_cursors[self._page] = it.get_offset()
x, y = self._textview.window_to_buffer_coords(
gtk.TEXT_WINDOW_TEXT, 0, 0)
it = self._textview.get_iter_at_location(x, y)
self._page_scrolls[self._page] = it.get_offset()
def _load_cursor(self):
# place cursor in last location
if self._page in self._page_cursors:
offset = self._page_cursors[self._page]
it = self._textview.get_buffer().get_iter_at_offset(offset)
self._textview.get_buffer().place_cursor(it)
# place scroll in last position
if self._page in self._page_scrolls:
offset = self._page_scrolls[self._page]
buf = self._textview.get_buffer()
it = buf.get_iter_at_offset(offset)
mark = buf.create_mark(None, it, True)
self._textview.scroll_to_mark(mark,
0.49, use_align=True, xalign=0.0)
buf.delete_mark(mark)
def save(self):
"""Save the loaded page"""
if self._page is not None and \
self._page.is_valid() and \
(SourceView or
self._textview.is_modified()):
try:
# save text data
buf = self._textview.get_buffer()
text = unicode_gtk(buf.get_text(buf.get_start_iter(),
buf.get_end_iter()))
#out = safefile.open(
# os.path.join(self._page.get_path(),
# self._page.get_attr("payload_filename")), "w",
# codec="utf-8")
out = self._page.open_file(
self._page.get_attr("payload_filename"), "w", "utf-8")
out.write(text)
out.close()
# save meta data
self._page.set_attr_timestamp("modified_time")
self._page.save()
except RichTextError, e:
self.emit("error", e.msg, e)
except NoteBookError, e:
self.emit("error", e.msg, e)
except Exception, e:
self.emit("error", str(e), e)
def save_needed(self):
"""Returns True if textview is modified"""
if not SourceView:
return self._textview.is_modified()
return False
def add_ui(self, window):
if not SourceView:
self._textview.set_accel_group(window.get_accel_group())
self._textview.set_accel_path(CONTEXT_MENU_ACCEL_PATH)
#if hasattr(self, "_socket"):
# print "id", self._socket.get_id()
# self._socket.add_id(0x480001f)
#self.editor_menus.add_ui(window,
# use_minitoolbar=
# self._app.pref.get("look_and_feel",
# "use_minitoolbar",
# default=False))
def remove_ui(self, window):
pass
#self.editor_menus.remove_ui(window)
#===========================================
# callbacks for textview
def _on_modified_callback(self, textview, modified):
"""Callback for textview modification"""
self.emit("modified", self._page, modified)
# make notebook node modified
if modified:
self._page.mark_modified()
self._page.notify_change(False)
def _on_visit_url(self, textview, url):
"""Callback for textview visiting a URL"""
if is_node_url(url):
host, nodeid = parse_node_url(url)
node = self._notebook.get_node_by_id(nodeid)
if node:
self.emit("visit-node", node)
else:
try:
self._app.open_webpage(url)
except KeepNoteError, e:
self.emit("error", e.msg, e)
class EditorMenus (gobject.GObject):
def __init__(self, app, editor):
gobject.GObject.__init__(self)
self._app = app
self._editor = editor
self._action_group = None
self._uis = []
self.spell_check_toggle = None
self._removed_widgets = []
#=======================================================
# spellcheck
def enable_spell_check(self, enabled):
"""Spell check"""
self._editor.get_textview().enable_spell_check(enabled)
# see if spell check became enabled
enabled = self._editor.get_textview().is_spell_check_enabled()
# update UI to match
if self.spell_check_toggle:
self.spell_check_toggle.set_active(enabled)
return enabled
def on_spell_check_toggle(self, widget):
"""Toggle spell checker"""
self.enable_spell_check(widget.get_active())
#=====================================================
# toolbar and menus
def add_ui(self, window):
self._action_group = gtk.ActionGroup("Editor")
self._uis = []
add_actions(self._action_group, self.get_actions())
window.get_uimanager().insert_action_group(
self._action_group, 0)
for s in self.get_ui():
self._uis.append(window.get_uimanager().add_ui_from_string(s))
window.get_uimanager().ensure_update()
self.setup_menu(window, window.get_uimanager())
def remove_ui(self, window):
# remove ui
for ui in reversed(self._uis):
window.get_uimanager().remove_ui(ui)
self._uis = []
window.get_uimanager().ensure_update()
# remove action group
window.get_uimanager().remove_action_group(self._action_group)
self._action_group = None
def get_actions(self):
def BothAction(name1, *args):
return [Action(name1, *args), ToggleAction(name1 + " Tool", *args)]
return (map(lambda x: Action(*x), [
# finding
("Find In Page", gtk.STOCK_FIND, _("_Find In Page..."),
"<control>F", None,
lambda w: self._editor.find_dialog.on_find(False)),
("Find Next In Page", gtk.STOCK_FIND, _("Find _Next In Page..."),
"<control>G", None,
lambda w: self._editor.find_dialog.on_find(False, forward=True)),
("Find Previous In Page", gtk.STOCK_FIND,
_("Find Pre_vious In Page..."),
"<control><shift>G", None,
lambda w: self._editor.find_dialog.on_find(False, forward=False)),
("Replace In Page", gtk.STOCK_FIND_AND_REPLACE,
_("_Replace In Page..."),
"<control>R", None,
lambda w: self._editor.find_dialog.on_find(True)),
]) +
[ToggleAction("Spell Check", None, _("_Spell Check"),
"", None,
self.on_spell_check_toggle)]
)
def get_ui(self):
ui = ["""
<ui>
<menubar name="main_menu_bar">
<menu action="Edit">
<placeholder name="Viewer">
<placeholder name="Editor">
<placeholder name="Extension"/>
</placeholder>
</placeholder>
</menu>
<menu action="Search">
<placeholder name="Viewer">
<placeholder name="Editor">
<menuitem action="Find In Page"/>
<menuitem action="Find Next In Page"/>
<menuitem action="Find Previous In Page"/>
<menuitem action="Replace In Page"/>
</placeholder>
</placeholder>
</menu>
<placeholder name="Viewer">
<placeholder name="Editor">
</placeholder>
</placeholder>
<menu action="Go">
<placeholder name="Viewer">
<placeholder name="Editor">
</placeholder>
</placeholder>
</menu>
<menu action="Tools">
<placeholder name="Viewer">
<menuitem action="Spell Check"/>
</placeholder>
</menu>
</menubar>
</ui>
"""]
ui.append("""
<ui>
<toolbar name="main_tool_bar">
<placeholder name="Viewer">
<placeholder name="Editor">
</placeholder>
</placeholder>
</toolbar>
</ui>
""")
return ui
def setup_menu(self, window, uimanager):
u = uimanager
# get spell check toggle
self.spell_check_toggle = \
uimanager.get_widget("/main_menu_bar/Tools/Viewer/Spell Check")
self.spell_check_toggle.set_sensitive(
self._editor.get_textview().can_spell_check())
self.spell_check_toggle.set_active(window.get_app().pref.get(
"editors", "general", "spell_check", default=True))
| yngfng/keepnote-0.7.8 | keepnote/gui/editor_sourceview.py | Python | gpl-2.0 | 17,284 | [
"VisIt"
] | 4c8eaae85ef24f6fe3b7748069cd3dbfa739e007e964693dd91801d35c1a6caf |
# -*- coding: utf8 -*-
"""
"""
__author__ = "Jérôme Samson"
__copyright__ = "Copyright 2014, Mikros Image"
import os
import sys
import csv
import time
import datetime
from optparse import OptionParser
import numpy as np
import pygal
from pygal.style import *
try:
import simplejson as json
except ImportError:
import json
from octopus.dispatcher import settings
from octopus.core import singletonconfig
from pulitools.common import lowerQuartile, higherQuartile
from pulitools.stats.common import createCommonParser, getRangeDates, prepareGraph, prepareScale, renderGraph
# import matplotlib.pyplot as plt
###########################################################################################################################
# {
# "date": timestamp
# "licenses": "{\"shave\" : \"0 / 70\",\"nuke\" : \"0 / 70\",\"clarisse\" : \"0 / 5\",\"mtoa\" : \"137 / 195\",\"katana\" : \"24 / 200\",\"ocula\" : \"0 / 3\"}",
# "rendernodes":
# {
# "renderNodesByStatus":
# {
# "Paused": 89,
# "Working": 152,
# "Unknown": 51,
# "Assigned": 0,
# "Idle": 15,
# "Booting": 0,
# "Finishing": 0
# },
# "totalCores": 5192,
# "missingRenderNodes": 51,
# "idleCores": 1844
# },
# "commands":
# {
# "ASSIGNED": 0,
# "CANCELED": 38926,
# "RUNNING": 151,
# "DONE": 67467,
# "TIMEOUT": 0,
# "ERROR": 115,
# "READY": 5455,
# "FINISHING": 0,
# "TOTAL": 117238,
# "BLOCKED": 5124
# },
# "jobs":
# {
# "total": 2519
# }
# }
def parseFarmArgs( commonParser ):
'''
Manages arguments parsing definition and help information
'''
commonParser.add_option( "--hide-offline", action="store_false", dest="offline", help="", default=True )
commonParser.add_option( "--hide-paused", action="store_false", dest="paused", help="", default=True )
commonParser.add_option( "--hide-working", action="store_false", dest="working", help="", default=True )
commonParser.add_option( "--hide-idle", action="store_false", dest="idle", help="", default=True )
options, args = commonParser.parse_args()
return options, args
if __name__ == "__main__":
# DBG
# startTime = time.time()
# prevTime = time.time()
# print ("%s - init timer" % (datetime.datetime.now()))
options, args = parseFarmArgs( createCommonParser() )
VERBOSE = options.verbose
if VERBOSE:
print "Command options: %s" % options
print "Command arguments: %s" % args
startDate, endDate = getRangeDates( options )
if VERBOSE:
print "Loading stats: %r " % options.sourceFile
print " - from: %r " % datetime.date.fromtimestamp(startDate)
print " - to: %r " % datetime.date.fromtimestamp(endDate)
print "Start."
nbjobs=[]
nb_working=[]
nb_paused=[]
nb_idle=[]
nb_assigned=[]
nb_unknown=[]
nb_booting=[]
nb_finishing=[]
strScale=[]
scale=[]
log = []
with open(options.sourceFile, "r" ) as f:
for line in f:
item = json.loads(line)
if item['date'] < startDate or endDate <= item['date'] :
continue
log.append( item )
# print "%s - %6.2f ms - load source complete" % (datetime.datetime.now(), (time.time() - prevTime) * 1000)
# prevTime = time.time()
for data in log:
eventDate = datetime.datetime.fromtimestamp( data['date'] )
if options.working:
nb_working.append(data["rendernodes"]["renderNodesByStatus"]['Working'] + data["rendernodes"]["renderNodesByStatus"]['Assigned'])
if options.paused:
nb_paused.append(data["rendernodes"]["renderNodesByStatus"]['Paused'])
if options.offline:
nb_unknown.append(data["rendernodes"]["renderNodesByStatus"]['Unknown'])
if options.idle:
nb_idle.append(data["rendernodes"]["renderNodesByStatus"]['Idle'])
scale.append( eventDate )
# print "%s - %6.2f ms - create temp array" % (datetime.datetime.now(), (time.time() - prevTime) * 1000)
# prevTime = time.time()
if VERBOSE:
print "Num events: %d" % len(scale)
if len(scale) < options.resolution:
if VERBOSE:
print "Too few events for resolution or scale: limit to %d" % len(scale)
options.resolution = len(scale)
stepSize = len(scale) / options.resolution
newshape = (options.resolution, stepSize)
useableSize = len(scale) - ( len(scale) % options.resolution )
# print "%s - %6.2f ms - create newshape" % (datetime.datetime.now(), (time.time() - prevTime) * 1000)
# prevTime = time.time()
if options.working:
working = np.array(nb_working[-useableSize:])
avg_working= np.around( np.mean( np.reshape(working, newshape), axis=1), decimals=0)
if options.offline:
unknown = np.array(nb_unknown[-useableSize:])
avg_unknown= np.around( np.mean( np.reshape(unknown, newshape), axis=1), decimals=0)
if options.paused:
paused = np.array(nb_paused[-useableSize:])
avg_paused= np.around( np.mean( np.reshape(paused, newshape), axis=1), decimals=0)
if options.idle:
idle = np.array(nb_idle[-useableSize:])
avg_idle= np.around( np.mean( np.reshape(idle, newshape), axis=1), decimals=0)
# print "%s - %6.2f ms - create and average numpy arrays" % (datetime.datetime.now(), (time.time() - prevTime) * 1000)
# prevTime = time.time()
# med= np.median(data, axis=1)
# amin= np.min(data, axis=1)
# amax= np.max(data, axis=1)
# q1= lowerQuartile(data)
# q2= higherQuartile(data)
# std= np.std(data, axis=1)
#
# Prepare scale
#
tmpscale = np.reshape(scale[-useableSize:], newshape)
strScale = prepareScale( tmpscale, options )
# print ("scale %d = %r" % (len(strScale), strScale) )
if VERBOSE:
print ("newshape %d = %r" % (len(newshape), newshape) )
print ("avg %d = %r" % (len(avg_working), avg_working) )
print ("scale %d = %r" % (len(strScale), strScale) )
graph = prepareGraph( options )
graph.title = options.title
graph.x_labels = strScale
if options.offline:
graph.add('Offline', avg_unknown )
if options.paused:
graph.add('Paused', avg_paused )
if options.working:
graph.add('Working', avg_working )
if options.idle:
graph.add('Idle', avg_idle )
# print "%s - %6.2f ms - prepare graph" % (datetime.datetime.now(), (time.time()-prevTime) * 1000)
# prevTime = time.time()
renderGraph( graph, options )
# print "%s - %6.2f ms - render graph" % (datetime.datetime.now(), (time.time() - prevTime) * 1000)
# print "%s - %6.2f ms - Total time" % (datetime.datetime.now(), (time.time() - startTime) * 1000)
if options.verbose:
print "Done." | mikrosimage/OpenRenderManagement | src/pulitools/stats/trace_farm.py | Python | bsd-3-clause | 7,375 | [
"Octopus"
] | b45536c786ce521507a0badd99d2fd042453231ebbc82627bc59c9068630d87d |
"""
The B{0install list-feeds} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _
from zeroinstall.cmd import UsageError
from zeroinstall.injector import model, reader
syntax = "URI"
def add_options(parser):
pass
def handle(config, options, args):
"""@type args: [str]"""
if len(args) != 1: raise UsageError()
uri = model.canonical_iface_uri(args[0])
iface = config.iface_cache.get_interface(uri)
if iface.extra_feeds:
for f in iface.extra_feeds:
print(f.uri)
else:
print(_("(no feeds)"))
# Lists only interfaces with feeds.
# Note: this is also used by remove-feed.
def complete(completion, args, cword):
"""@type completion: L{zeroinstall.cmd._Completion}
@type args: [str]
@type cword: int"""
if len(args) != 1: return
iface_cache = completion.config.iface_cache
for uri in iface_cache.list_all_interfaces():
dummy = model.Interface(uri)
reader.update_user_overrides(dummy)
if dummy.extra_feeds:
completion.add_filtered(uri)
| AlexanderRyzhko/0install-TUF | zeroinstall/cmd/list_feeds.py | Python | lgpl-2.1 | 1,108 | [
"VisIt"
] | d5d6a77d731a6384c0e1c6b82e3121f213b2ad9461c46898e25417c9beca7afa |
#!/usr/bin/env python
# pmx Copyright Notice
# ============================
#
# The pmx source code is copyrighted, but you can freely use and
# copy it as long as you don't change or remove any of the copyright
# notices.
#
# ----------------------------------------------------------------------
# pmx is Copyright (C) 2006-2013 by Daniel Seeliger
#
# All Rights Reserved
#
# Permission to use, copy, modify, distribute, and distribute modified
# versions of this software and its documentation for any purpose and
# without fee is hereby granted, provided that the above copyright
# notice appear in all copies and that both the copyright notice and
# this permission notice appear in supporting documentation, and that
# the name of Daniel Seeliger not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# DANIEL SEELIGER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL DANIEL SEELIGER BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ----------------------------------------------------------------------
__doc__="""
Program to insert mutated residues in structure files for
free energy simulations (so far unfinished new version).
"""
import sys,os
from pmx import *
from pmx.parser import *
from pmx import library
from pmx.mutdb import *
from pmx.geometry import *
class UnknownResidueError(Exception):
def __init__(self, s):
self.s = s
def __str__(self):
return repr(self.s)
class RangeCheckError(Exception):
def __init__(self, s):
self.s = s
def __str__(self):
return repr(self.s)
class mtpError(Exception):
def __init__(self,s):
self.s = s
def __str__(self):
return repr(self.s)
ext_one_letter = {
'ALA':'A',
'ARG':'R',
'ASN':'N',
'ASP':'D',
'ASPH':'B',
'ASPP':'B',
'ASH':'B',
'CYS':'C',
'CYS2':'C',
'CYN':'C',
'CYX':'C',
'CYM':'C',
'CYSH':'C',
'GLU':'E',
'GLUH':'J',
'GLUP':'J',
'GLH':'J',
'GLN':'Q',
'GLY':'G',
'HIS':'H',
'HIE':'X',
'HISE':'X',
'HSE':'X',
'HIP':'Z',
'HSP':'Z',
'HISH':'Z',
'HID':'H',
'HSD':'H',
'ILE':'I',
'LEU':'L',
'LYS':'K',
'LYSH':'K',
'LYP':'K',
'LYN':'O',
'LSN':'O',
'MET':'M',
'PHE':'F',
'PRO':'P',
'SER':'S',
'SP1':'SP1', # phosphoserine in charmm36
'SP2':'SP2', # phosphoserine in charmm36
'THR':'T',
'TRP':'W',
'TYR':'Y',
'VAL':'V',
}
noncanonical_aa = {
'S2SP1':'SSP1', # serine to pSer1
'S2SP2':'SSP2', # serine to pSer2
'SP12S':'SP1S', # pSer1 to serine
'SP22S':'SP2S', # pSer2 to setine
}
dna_names = {
'DA5_DT5':'D5K',
'DA5_DC5':'D5L',
'DA5_DG5':'D5M',
'DT5_DA5':'D5N',
'DT5_DC5':'D5O',
'DT5_DG5':'D5P',
'DC5_DA5':'D5R',
'DC5_DT5':'D5S',
'DC5_DG5':'D5T',
'DG5_DA5':'D5X',
'DG5_DT5':'D5Y',
'DG5_DC5':'D5Z',
'DA3_DT3':'D3K',
'DA3_DC3':'D3L',
'DA3_DG3':'D3M',
'DT3_DA3':'D3N',
'DT3_DC3':'D3O',
'DT3_DG3':'D3P',
'DC3_DA3':'D3R',
'DC3_DT3':'D3S',
'DC3_DG3':'D3T',
'DG3_DA3':'D3X',
'DG3_DT3':'D3Y',
'DG3_DC3':'D3Z',
# False names to avoid an error
'DG3_DG3':'FOO',
'DC3_DC3':'FOO',
'DA3_DA3':'FOO',
'DT3_DT3':'FOO',
'DG5_DG5':'FOO',
'DC5_DC5':'FOO',
'DA5_DA5':'FOO',
'DT5_DT5':'FOO',
}
rna_names = {
'RA5_RU5':'R5K',
'RA5_RC5':'R5L',
'RA5_RG5':'R5M',
'RU5_RA5':'R5N',
'RU5_RC5':'R5O',
'RU5_RG5':'R5P',
'RC5_RA5':'R5R',
'RC5_RU5':'R5S',
'RC5_RG5':'R5T',
'RG5_RA5':'R5X',
'RG5_RU5':'R5Y',
'RG5_RC5':'R5Z',
'RA3_RU3':'R3K',
'RA3_RC3':'R3L',
'RA3_RG3':'R3M',
'RU3_RA3':'R3N',
'RU3_RC3':'R3O',
'RU3_RG3':'R3P',
'RC3_RA3':'R3R',
'RC3_RU3':'R3S',
'RC3_RG3':'R3T',
'RG3_RA3':'R3X',
'RG3_RU3':'R3Y',
'RG3_RC3':'R3Z',
# False names to avoid an error
'RG3_RG3':'FOO',
'RC3_RC3':'FOO',
'RA3_RA3':'FOO',
'RU3_RU3':'FOO',
'RG5_RG5':'FOO',
'RC5_RC5':'FOO',
'RA5_RA5':'FOO',
'RU5_RU5':'FOO',
}
def check_residue_name( res ):
if res.resname == 'LYS':
if res.has_atom( 'HZ3'):
res.set_resname('LYP')
elif res.resname == 'HIS':
if res.has_atom('HD1') and \
res.has_atom('HE2'):
res.set_resname('HIP')
elif res.has_atom('HD1') and not \
res.has_atom('HE2'):
res.set_resname( 'HID' )
elif not res.has_atom('HD1') and \
res.has_atom('HE2'):
res.set_resname( 'HIE' )
elif res.resname == 'ASP':
if res.has_atom('HD2'):
res.set_resname('ASH')
elif res.resname == 'GLU':
if res.has_atom('HE2'):
res.set_resname('GLH')
elif res.resname == 'CYS':
if not res.has_atom('HG'):
print >>sys.stderr,' Cannot mutate SS-bonded Cys %d' % res.id
def check_OPLS_LYS( res ):
if res.has_atom( 'HZ3'):
return('K')
else:
return('O')
#def get_restype(r):
# if r.resname in ['DA','DT','DC','DG']:
# return 'DNA'
# elif r.resname in ['RA','RU','RC','RG']:
# return 'RNA'
# else: return 'PEPTIDE'
def read_script(fn):
return read_and_format(fn,"is")
def int_input():
inp = raw_input()
try:
inp = int(inp)
return inp
except:
print 'You entered "%s" -> Try again' % inp
return None
def check_residue_range(m, idx):
valid_ids = range(1, len(m.residues)+1)
if idx not in valid_ids: return False
return True
def select_residue(m):
valid_ids = range(1, len(m.residues)+1)
print '\nSelect residue to mutate:'
for i,r in enumerate(m.residues):
if r.resname not in library._ions+library._water:
sys.stdout.write('%6d-%s-%s' % (r.id,r.resname,r.chain_id))
if r.id % 6 == 0: print
print
selected_residue_id = None
while not selected_residue_id:
sys.stdout.write('Enter residue number: ')
selected_residue_id = int_input()
if selected_residue_id is not None and selected_residue_id not in valid_ids:
print 'Residue id %d not in range %d-%d -> Try again' % (selected_residue_id,1,len(residues))
selected_residue_id = None
return selected_residue_id
def select_mutation(m, selected_residue_id, ffpath):
residue = m.residues[selected_residue_id - 1]
if get_restype(residue) == 'PEPTIDE':
return select_aa_mutation(residue,ffpath)
elif get_restype(residue) in ['DNA','RNA']:
return select_nuc_mutation(residue)
def select_nuc_mutation(residue):
aa = None
print '\nSelect new base for %s-%s: ' % (residue.id,residue.resname)
sys.stdout.write('One-letter code: ')
while aa is None:
aa = raw_input().upper()
if get_restype(residue) == 'DNA' and aa not in ['A','C','G','T']:
sys.stdout.write('Unknown DNA residue "%s"!\nOne-letter code: ' % aa)
aa = None
elif get_restype(residue) == 'RNA' and aa not in ['A','C','G','U']:
sys.stdout.write('Unknown RNA residue "%s"!\nOne-letter code: ' % aa)
aa = None
if aa:
print 'Will apply mutation %s->%s on residue %s-%d' % (residue.resname[1],aa,residue.resname,residue.id)
return aa
def select_aa_mutation(residue,ffpath):
check_residue_name( residue )
print '\nSelect new amino acid for %s-%s: ' % (residue.id,residue.resname)
sys.stdout.write('Three- or one-letter code (or four-letter for ff specific residues): ')
if residue.resname in ['HIE','HISE','HSE']: rol = 'X'
elif residue.resname in ['HIP','HISH','HSP']: rol = 'Z'
elif residue.resname in ['GLH','GLUH','GLUP']: rol = 'J'
elif residue.resname in ['ASH','ASPH','ASPP']: rol = 'B'
elif residue.resname in ['LYN','LYS','LSN']: rol = 'O'
else:
rol = library._one_letter[residue.resname]
aa = None
ol = library._aacids_dic.keys()
tl = library._aacids_dic.values()
ffpathlower = ffpath.lower()
if('amber' in ffpathlower):
ol = library._aacids_ext_amber.keys()
tl = library._aacids_ext_amber.values()
if('opls' in ffpathlower):
ol = library._aacids_ext_oplsaa.keys()
tl = library._aacids_ext_oplsaa.values()+['ASPP','GLUP','LSN']
if('charmm' in ffpathlower):
ol = library._aacids_ext_charmm.keys()
tl = library._aacids_ext_charmm.values()
while aa is None:
aa = raw_input().upper()
if len(aa) != 1 and len(aa)!=3 and len(aa)!=4:
sys.stdout.write('Nope!\nThree- or one-letter code (or four-letter for ff specific residues): ')
aa = None
elif (len(aa) == 1 and aa not in ol+['B','J','O','X','Z']) or (len(aa)==3 and aa not in tl) or (len(aa)==4 and aa not in tl):
sys.stdout.write('Unknown aa "%s"!\nThree- or one-letter code (or four-letter for ff specific residues): ' % aa)
aa = None
if aa and (len(aa)==3 or len(aa)==4): aa = ext_one_letter[aa]
print 'Will apply mutation %s->%s on residue %s-%d' % (rol,aa,residue.resname,residue.id)
return aa
def interactive_selection(m,ffpath):
residue_id = select_residue(m)
mutation = select_mutation(m, residue_id, ffpath )
return residue_id, mutation
def ask_next():
sys.stdout.write('\nApply another mutation [y/n]? ')
res = raw_input().lower()
if res == 'y': return True
elif res == 'n': return False
else: return ask_next()
def convert_aa_name( aa ):
if len(aa) == 1: return aa.upper()
elif len(aa) == 3: return ext_one_letter[aa.upper()]
elif len(aa) == 4: return ext_one_letter[aa.upper()]
else: raise UnkownResidueError(aa)
def rename_to_match_library(res):
name_hash = {}
atoms = res.atoms
for atom in atoms:
foo = atom.name
## for serine
if (atom.resname == 'SER') and (atom.name == 'HG1'):
atom.name = 'HG'
if ('S2' in atom.resname) and (atom.name == 'HG1'):
atom.name = 'HG'
if ('SP1' in atom.resname) and (atom.name == 'HG1'): # phosphoserine in charmm36
atom.name = 'HG'
if ('SP2' in atom.resname) and (atom.name == 'HG1'): # phosphoserine in charmm36
atom.name = 'HG'
## for cysteine
if (atom.resname == 'CYS') and (atom.name == 'HG1'):
atom.name = 'HG'
if ('C2' in atom.resname) and (atom.name == 'HG1'):
atom.name = 'HG'
# print atom.resname,atom.name
name_hash[atom.name] = foo
return name_hash
def rename_back( res, name_hash ):
for atom in res.atoms:
atom.name = name_hash[atom.name]
def set_conformation(old_res, new_res, rotdic):
old_res.get_real_resname()
dihedrals = library._aa_dihedrals[old_res.real_resname]
for key, lst in rotdic.items():
new = new_res.fetchm(lst)
rotdic[key] = new
chis = []
for key in rotdic.keys():
at1,at2 = key.split('-')
for d in dihedrals:
if d[1] == at1 and d[2] == at2 \
and d[-1] != -1:
chis.append(d)
for d in chis:
atoms = old_res.fetchm(d[:4])
phi = atoms[0].dihedral(atoms[1], atoms[2], atoms[3])
atoms2 = new_res.fetchm(d[:4])
phi2 = atoms2[0].dihedral(atoms2[1], atoms2[2], atoms2[3])
diff = phi-phi2
a1,a2 = new_res.fetchm(d[1:3])
key= a1.name+'-'+a2.name
atoms = rotdic[key]
rot = Rotation(a1.x,a2.x)
for atom in atoms:
atom.x = rot.apply(atom.x,diff)
# sys.exit(0)
for atom in new_res.atoms:
if atom.name[0] != 'D':
atom.x = old_res[atom.name].x
def get_nuc_hybrid_resname(residue,new_nuc_name,bRNA=False):
firstLetter = 'D'
if bRNA:
firstLetter = 'R'
# identify if the nucleotide is terminal
for a in residue.atoms:
if a.name=='H3T':
r1 = firstLetter+residue.resname[1]+'3'
r2 = firstLetter+new_nuc_name+'3'
dict_key = r1+'_'+r2
if bRNA:
hybrid_residue_name = rna_names[dict_key]
else:
hybrid_residue_name = dna_names[dict_key]
return(hybrid_residue_name,residue.resname[1],new_nuc_name)
elif a.name=='H5T':
r1 = firstLetter+residue.resname[1]+'5'
r2 = firstLetter+new_nuc_name+'5'
dict_key = r1+'_'+r2
if bRNA:
hybrid_residue_name = rna_names[dict_key]
else:
hybrid_residue_name = dna_names[dict_key]
return(hybrid_residue_name,residue.resname[1],new_nuc_name)
hybrid_residue_name = residue.resname+new_nuc_name
return(hybrid_residue_name,residue.resname[1],new_nuc_name)
def apply_nuc_mutation(m, residue, new_nuc_name, mtp_file, bRNA=False):
# hybrid_residue_name = residue.resname+new_nuc_name
hybrid_residue_name,resname1,resname2 = get_nuc_hybrid_resname(residue,new_nuc_name,bRNA)
print 'log_> Residue to mutate: %d | %s | %s ' % ( residue.id, residue.resname, residue.chain_id)
print 'log_> Mutation to apply: %s->%s' % (residue.resname[1], new_nuc_name)
print 'log_> Hybrid residue name: %s' % hybrid_residue_name
hybrid_res, bonds, imps, diheds, rotdic = get_hybrid_residue(hybrid_residue_name, mtp_file)
# hybrid_res.nm2a()
nuc_super( residue, hybrid_res, resname1, resname2 )
for atom in hybrid_res.atoms:
if atom.name[0] != 'D':
atom.x = residue[atom.name].x
m.replace_residue( residue, hybrid_res)
print 'log_> Inserted hybrid residue %s at position %d (chain %s)' %\
(hybrid_res.resname, hybrid_res.id, hybrid_res.chain_id)
def apply_aa_mutation(m, residue, new_aa_name, mtp_file, bStrB, infileB):
if residue.resname == 'ILE': rename_ile( residue )
olkey = convert_aa_name( residue.resname )
# olkey should contain the correct one letter name of the WT residue
# however, due to the different namings of the residues in the FFs
# Lys needs to be checked once again: in OPLS Lys is non-protonated, while in the other FFs it is protonated
if ('opls' in mtp_file) and ('LYS' in residue.resname):
olkey = check_OPLS_LYS( residue )
hybrid_residue_name = olkey+'2'+new_aa_name
if hybrid_residue_name in noncanonical_aa.keys():
hybrid_residue_name = noncanonical_aa[hybrid_residue_name]
print 'log_> Residue to mutate: %d | %s | %s ' % ( residue.id, residue.resname, residue.chain_id)
print 'log_> Mutation to apply: %s->%s' % (olkey, new_aa_name)
print 'log_> Hybrid residue name: %s' % hybrid_residue_name
hybrid_res, bonds, imps, diheds, rotdic = get_hybrid_residue(hybrid_residue_name, mtp_file)
#hybrid_res.nm2a()
bb_super(residue, hybrid_res )
## VG rename residue atoms
hash1 = rename_to_match_library(residue)
hash2 = rename_to_match_library(hybrid_res)
set_conformation(residue, hybrid_res, rotdic)
if bStrB:
print "log_> Set Bstate geometry according to the provided structure"
mB = Model(infileB,bPDBTER=True)
rename_atoms_to_gromacs( mB )
mB.nm2a()
residueB = mB.residues[residue.id-1]
bb_super(residue, residueB )
for atom in hybrid_res.atoms:
if atom.name[0] == 'D':
for atomB in residueB.atoms:
if atomB.name == hybrid_res.morphes[atom.name]['n1']:
atom.x = atomB.x
rename_back(residue,hash1)
rename_back(hybrid_res,hash2)
## VG rename residue atoms back
m.replace_residue( residue, hybrid_res)
print 'log_> Inserted hybrid residue %s at position %d (chain %s)' %\
(hybrid_res.resname, hybrid_res.id, hybrid_res.chain_id)
def apply_mutation(m, mut, mtp_file, bStrB, infileB, bRNA):
residue_id = mut[0]
if not check_residue_range(m, residue_id):
raise RangeCheckError(residue_id)
residue = m.residues[residue_id - 1]
if get_restype(residue) == 'PEPTIDE':
new_aa_name = convert_aa_name( mut[1] )
apply_aa_mutation(m, residue, new_aa_name, mtp_file, bStrB, infileB)
elif get_restype(residue) in ['DNA','RNA']:
new_nuc_name = mut[1].upper()
apply_nuc_mutation(m, residue, new_nuc_name, mtp_file, bRNA)
def get_hybrid_residue(residue_name, mtp_file = 'ffamber99sb.mtp'):
print 'log_> Scanning database for %s ' % residue_name
resi, bonds, imps, diheds, rotdic = read_mtp_entry(residue_name, filename = mtp_file, version = 'new')
if len(resi.atoms) == 0:
raise mtpError("Hybrid residue %s not found in %s" % (residue_name, mtp_file) )
return resi, bonds, imps, diheds, rotdic
def rename_ile(residue):
dic = {'CD':'CD1',
'HD1':'HD11',
'HD2':'HD12',
'HD3':'HD13'
}
for key, value in dic.items():
try:
atom = residue[key]
atom.name = value
except:
pass
def rename_atoms_to_gromacs( m ):
for atom in m.atoms:
if atom.name[0].isdigit():
atom.name = atom.name[1:]+atom.name[0]
def get_restype(r):
if r.resname in ['DA','DT','DC','DG','DA3','DT3','DC3','DG3','DA5','DT5','DC5','DG5']:
return 'DNA'
elif r.resname in ['RA','RU','RC','RG','RA3','RU3','RC3','RG3','RA5','RU5','RC5','RG5']:
return 'RNA'
else: return 'PEPTIDE'
def get_ff_path( ff ):
ff_path = None
if not os.path.isdir(ff):
gmxlib = os.environ.get('GMXLIB')
p = os.path.join(gmxlib,ff)
pff = p+'.ff'
if os.path.isdir(p):
ff_path = p
elif os.path.isdir(pff):
ff_path = pff
else:
print >>sys.stderr,' Error: forcefield path "%s" not found' % ff
sys.exit(0)
else:
ff_path = ff
print 'Opening forcefield: %s' % ff_path
return ff_path
def main(argv):
options = [
Option( "-resinfo", "bool", False, "print a 3-letter -> 1-letter residue list"),
Option( "-dna", "bool", False, "generate hybrid residue for the DNA nucleotides"),
Option( "-rna", "bool", False, "generate hybrid residue for the RNA nucleotides"),
## Option( "-r", "rvec", [1,2,3], "some string"),
## Option( "-b", "bool", True, "bool"),
## Option( "-r2", "rvec", [1,2,3], "some vector that does wonderful things and returns always segfaults")
]
files = [
FileOption("-f", "r",["pdb","gro"],"protein.pdb", "input structure file"),
FileOption("-fB", "r",["pdb","gro"],"proteinB.pdb", "input structure file of the Bstate (optional)"),
FileOption("-o", "w",["pdb","gro"],"out.pdb", "output structure file"),
FileOption("-ff", "dir",["ff"],"amber99sbmut", "path to mutation forcefield"),
FileOption("-script", "r",["txt"],"mutations.txt", "text file with mutations to insert"),
]
help_text = ('This script applies mutations of residues in a structure file ',
'for subsequent free energy calculations like FEP, TI, etc.',
'The mutation information and dummy placements are taken from',
'the hybrid residue database "mutres.mtp". The best way to use',
'this script is to take a pdb/gro file that has been written with pdb2gmx',
'with all hydrogen atoms present.'
'The program can either be executed interactively or via script.',
'The script file simply has to consist of "resi_number target_residue." pairs.',
'The script uses an extended one-letter code for amino acids to account for',
'different protonation states. Use the -resinfo flag to print the dictionary.',
'Currently available force fields:',
' - amber99sbmut (Hornak et al, 2006)',
' - amber99sb-star-ildn-mut (Best & Hummer, 2009; Lindorff-Larsen et al, 2010)',
' - charmm22starmut.ff (Piana et al, 2011)',
' - charmm36mut (Best et al, 2012)',
' - oplsaamut (Jorgensen et al, 1996; Kaminski et al, 2001)',
'',
'',
'Please cite:',
'Vytautas Gapsys, Servaas Michielssens, Daniel Seeliger and Bert L. de Groot.',
'Automated Protein Structure and Topology Generation for Alchemical Perturbations.',
'J. Comput. Chem. 2015, 36, 348-354. DOI: 10.1002/jcc.23804',
'',
'Old pmx (pymacs) version:',
'Daniel Seeliger and Bert L. de Groot. Protein Thermostability Calculations Using',
'Alchemical Free Energy Simulations, Biophysical Journal, 98(10):2309-2316 (2010)',
'',
'',
'',
)
cmdl = Commandline( argv, options = options,
fileoptions = files,
program_desc = help_text,
check_for_existing_files = False )
bDNA = cmdl['-dna']
bRNA = cmdl['-rna']
if cmdl['-resinfo']:
print 'Residue dictionary:'
lst = ext_one_letter.items()
lst.sort(lambda a,b: cmp(a,b))
for key, val in lst:
print "%5s %4s" % (key, val)
sys.exit(0)
bStrB = False
infileB = ''
if cmdl.opt['-fB'].is_set:
bStrB = True
infileB = cmdl['-fB']
ffpath = get_ff_path(cmdl['-ff'])
if bDNA:
mtp_file = os.path.join( ffpath,'mutres_dna.mtp')
elif bRNA:
mtp_file = os.path.join( ffpath,'mutres_rna.mtp')
else:
mtp_file = os.path.join( ffpath,'mutres.mtp')
infile = cmdl['-f']
m = Model(infile,bPDBTER=True)
rename_atoms_to_gromacs( m )
# m.write('ll.pdb')
m.nm2a()
# m.rename_atoms()
mutation_list = []
if cmdl.opt['-script'].is_set:
mutations_to_make = read_script( cmdl['-script'] )
for mut in mutations_to_make:
check_residue_name( m.residues[ mut[0]-1 ] )
apply_mutation( m, mut, mtp_file, bStrB, infileB, bRNA )
else:
do_more = True
while do_more:
mutation = interactive_selection(m,ffpath)
apply_mutation( m, mutation, mtp_file, bStrB, infileB, bRNA )
if not ask_next(): do_more = False
m.write(cmdl['-o'],bPDBTER=True)
print
print 'mutations done...........'
print
if __name__=='__main__':
main(sys.argv)
| dseeliger/pmx | scripts/mutate_v2.py | Python | lgpl-3.0 | 22,705 | [
"Amber",
"CHARMM"
] | df601afdd88c61fc91bd1e16b570f23a31339417761a17989b4005ff0be71ee4 |
import sys
import csv
import blastparser
import screed
def collect_best_hits(filename):
d = {}
for n, record in enumerate(blastparser.parse_fp(open(filename))):
if n % 25000 == 0:
print >>sys.stderr, '...', filename, n
best_score = None
for hit in record.hits:
for match in hit.matches:
query = record.query_name
if query.startswith('gi'):
query = query.split('|', 2)[2]
subject = hit.subject_name
score = match.score
# only keep the best set of scores for any query
if best_score and best_score > score:
continue
best_score = score
x = d.get(query, [])
x.append((subject, score))
d[query] = x
if best_score and best_score != score:
break
return d
def parse_ncbi_query(name):
name = name.split('|')[2:]
name = '|'.join(name)
return name
def load_names(filename):
d = {}
for record in screed.open(filename):
if record.name.startswith('gi|'):
ident = record.name.split('|', 2)[2]
else:
ident = record.name
d[ident] = record.description
return d
# open the output file for reading
query_seqs = sys.argv[1]
against_seqs = sys.argv[2]
ab = sys.argv[3]
ba = sys.argv[4]
print >>sys.stderr, "reading query seq names from", query_seqs
query_db = load_names(query_seqs)
print >>sys.stderr, "reading against seq names from", against_seqs
against_db = load_names(against_seqs)
# send output as comma-separated values to stdout
output = csv.writer(sys.stdout)
# parse BLAST records
print >>sys.stderr, 'parsing BLAST output', ab
ab_dict = collect_best_hits(ab)
print >>sys.stderr, 'parsing BLAST output', ba
ba_dict = collect_best_hits(ba)
print >>sys.stderr, 'calculating reciprocal best hits'
dd = {}
ee = {}
for k in ab_dict:
v = map(lambda x: x[0], ab_dict[k])
for k2 in v:
v2 = map(lambda x: x[0], ba_dict.get(k2, []))
if k in v2:
dd[k] = k2
ee[k2] = k
for k in dd:
v = dd[k]
query_descr = query_db.get(k, "")
against_descr = against_db.get(v, "")
# output each match as a separate row
row = [k, query_descr, v, against_descr]
output.writerow(row)
| jrherr/bioinformatics_scripts | python_scripts/blast_to_ortho_csv.py | Python | mit | 2,394 | [
"BLAST"
] | 266d9e14b72cb2038c9cc4a61af3dff8b309b162dc4a9c21eb8acdfd84e07afe |
"""
Test the mio parametrization of Frauenheim and co-workers.
"""
import os
import glob
import numpy as np
from ase import read, FIRE, QuasiNewton, molecule
from hotbit import Hotbit, database_from_path
###
FMAX = 0.005
OPT = FIRE
debug = False
###
# From Elstner et al., Phys. Rev. B 58, 7260
noscc_db = {
'C=O': 1.296,
'C-N': 1.296,
'N-H': 1.003,
'C-H': 1.130,
'OCN': 127.0
}
scc_db = {
'C=O': 1.224,
'C-N': 1.382,
'N-H': 0.996,
'C-H': 1.131,
'OCN': 125.5
}
db1 = {
False: noscc_db,
True: scc_db
}
# From Kruger et al., J. Chem. Phys. 122, 114110
db2 = {
'H2': {
'H-H': ( ( 0, 1 ), 0.750 )
},
'C2H2': {
'C-H': ( ( 1, 2 ), 1.075 ),
'C-C': ( ( 0, 1 ), 1.203 )
},
'C2H4': {
'C-H': ( ( 0, 2 ), 1.094 ),
'C-C': ( ( 0, 1 ), 1.328 )
},
'C2H6': {
'C-H': ( ( 0, 3 ), 1.098 ),
'C-C': ( ( 0, 1 ), 1.501 )
},
'HCN': {
'C-H': ( ( 0, 2 ), 1.078 ),
'C-N': ( ( 0, 1 ), 1.141 )
},
'NH3': {
'N-H': ( ( 0, 1 ), 1.021 )
},
'CH4': {
'C-H': ( ( 0, 1 ), 1.089 )
},
'CO': {
# This differs from the paper, but I believe it's a typo
# paper says: 1.200
'C-O': ( ( 0, 1 ), 1.100 )
},
'H2CO': {
'C-H': ( ( 1, 2 ), 1.143 ),
'C-O': ( ( 0, 1 ), 1.183 )
},
'CH3OH': {
'O-H': ( ( 1, 3 ), 0.980 ),
'C-O': ( ( 0, 1 ), 1.422 )
},
'H2O': {
'O-H': ( ( 0, 1 ), 0.968 )
},
'N2': {
# This differs from the paper, but I believe it's a typo
# paper says: 1.200
'N-N': ( ( 0, 1 ), 1.113 )
},
'N2H4': {
'N-H': ( ( 0, 2 ), 1.037 ),
# This differs from the paper, and I don't know why
# paper says: 1.442
'N-N': ( ( 0, 1 ), 1.407 )
},
'H2O2': {
'O-H': ( ( 0, 2 ), 0.991 ),
'O-O': ( ( 0, 1 ), 1.453 )
},
'CO2': {
'C-O': ( ( 0, 1 ), 1.165 )
}
}
def check_q(db, name, value):
refvalue = db[name]
if debug:
print('%10s %10.3f %10.3f' % ( name, value, refvalue ))
#assert abs(value-refvalue) < 1e-3
def check_db(db, params):
if debug:
print("%10s %10s %10s ( %10s )" \
% ( "bond", "value", "reference", "error" ))
for mol, values in db.items():
#if mol == 'H2O':
if 1:
if debug:
print(mol)
a = molecule(mol)
a.center(vacuum=10.0)
a.set_pbc(False)
#print a.get_chemical_symbols()
calc = Hotbit(
charge_density = 'Slater',
SCC = True,
width = 1e-6,
txt = 'mio.out',
**params)
a.set_calculator(calc)
#calc.ia.plot_table('H', 'H')
#calc.rep.get_repulsion('H', 'H').plot()
OPT(a, logfile='opt.log').run(fmax=FMAX)
#print a.get_charges()
for name, ( ( i1, i2 ), refvalue ) in values.items():
value = a.get_distance(i1, i2)
if debug:
print('%10s %10.3f %10.3f ( %10.3f )' % \
( name, value, refvalue, abs(value-refvalue) ))
assert abs(value-refvalue) < 0.01
#e = [ ]
#for x in np.linspace(0.70, 0.80, 1000):
# a.set_distance(0, 1, x)
# e += [ ( x, a.get_potential_energy() ) ]
#np.savetxt('e.out', e)
###
params = database_from_path(os.getenv('PBC_0_2'))
###
if debug:
for SCC in [ False, True ]:
if SCC:
print("--- SCC ---")
else:
print("--- no SCC ---")
calc = Hotbit(
charge_density = 'Slater',
SCC = SCC,
verbose = True,
verbose_SCC = True,
mixer = {
'name': 'anderson',
'convergence': 1e-6,
'mixing_constant': 0.01 },
maxiter = 1000,
txt = 'mio.out',
**params)
a = read('formamide.xyz')
a.center(vacuum=10.0)
a.set_pbc(False)
a.set_calculator(calc)
OPT(a, logfile='opt.log').run(fmax=FMAX)
iO = 0
iC = 1
iN = 2
iHC = 3
iHN = 4
assert a[iO].get_symbol() == 'O'
assert a[iC].get_symbol() == 'C'
assert a[iN].get_symbol() == 'N'
assert a[iHC].get_symbol() == 'H'
assert a[iHN].get_symbol() == 'H'
check_q(db1[SCC], 'C=O', a.get_distance(iC, iO))
check_q(db1[SCC], 'C-N', a.get_distance(iC, iN))
check_q(db1[SCC], 'N-H', a.get_distance(iN, iHN))
check_q(db1[SCC], 'C-H', a.get_distance(iC, iHC))
###
check_db(db2, params)
| pekkosk/hotbit | hotbit/test/pbc.py | Python | gpl-2.0 | 4,916 | [
"ASE"
] | 3caf962c7e52e55c41d53b061831f483bb8259a4372eb948c1a6c95026afea43 |
# -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath("."))
from custom_directives import *
from datetime import datetime
# Mocking modules allows Sphinx to work without installing Ray.
mock_modules()
assert (
"ray" not in sys.modules
), "If ray is already imported, we will not render documentation correctly!"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../../python/"))
import ray
# -- General configuration ------------------------------------------------
extensions = [
"sphinx_panels",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx_click.ext",
"sphinx-jsonschema",
"sphinxemoji.sphinxemoji",
"sphinx_copybutton",
"sphinxcontrib.yt",
"versionwarning.extension",
"sphinx_sitemap",
"myst_nb",
"sphinx.ext.doctest",
"sphinx.ext.coverage",
"sphinx_external_toc",
"sphinx_thebe",
]
myst_enable_extensions = [
"dollarmath",
"amsmath",
"deflist",
"html_admonition",
"html_image",
"colon_fence",
"smartquotes",
"replacements",
]
# Thebe configuration for launching notebook cells within the docs.
thebe_config = {
"selector": "div.highlight",
"repository_url": "https://github.com/ray-project/ray",
"repository_branch": "master",
}
# Cache notebook outputs in _build/.jupyter_cache
# To prevent notebook execution, set this to "off". To force re-execution, set this to "force".
# To cache previous runs, set this to "cache".
jupyter_execute_notebooks = os.getenv("RUN_NOTEBOOKS", "off")
external_toc_exclude_missing = False
external_toc_path = "_toc.yml"
# There's a flaky autodoc import for "TensorFlowVariables" that fails depending on the doc structure / order
# of imports.
# autodoc_mock_imports = ["ray.experimental.tf_utils"]
# This is used to suppress warnings about explicit "toctree" directives.
suppress_warnings = ["etoc.toctree"]
versionwarning_admonition_type = "note"
versionwarning_banner_title = "Join the Ray Discuss Forums!"
FORUM_LINK = "https://discuss.ray.io"
versionwarning_messages = {
# Re-enable this after Ray Summit.
# "latest": (
# "This document is for the latest pip release. "
# 'Visit the <a href="/en/master/">master branch documentation here</a>.'
# ),
"master": (
"<b>Got questions?</b> Join "
f'<a href="{FORUM_LINK}">the Ray Community forum</a> '
"for Q&A on all things Ray, as well as to share and learn use cases "
"and best practices with the Ray community."
),
}
versionwarning_body_selector = "#main-content"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Ray"
copyright = str(datetime.now().year) + ", The Ray Team"
author = "The Ray Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
from ray import __version__ as version
# The full version, including alpha/beta/rc tags.
release = version
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# If "DOC_LIB" is found, only build that top-level navigation item.
build_one_lib = os.getenv("DOC_LIB")
all_toc_libs = [f.path for f in os.scandir(".") if f.is_dir() and "ray-" in f.path]
all_toc_libs += [
"cluster",
"tune",
"data",
"raysgd",
"train",
"rllib",
"serve",
"workflows",
]
if build_one_lib and build_one_lib in all_toc_libs:
all_toc_libs.remove(build_one_lib)
exclude_patterns += all_toc_libs
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "lovelace"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Do not check anchors for links because it produces many false positives
# and is slow (it needs to download the linked website).
linkcheck_anchors = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_book_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"repository_url": "https://github.com/ray-project/ray",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"path_to_docs": "doc/source",
"home_page_in_toc": False,
"show_navbar_depth": 0,
"launch_buttons": {
"notebook_interface": "jupyterlab",
"binderhub_url": "https://mybinder.org",
"colab_url": "https://colab.research.google.com",
},
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f"Ray {release}"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/ray_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "Raydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
latex_documents = [
(master_doc, "Ray.tex", "Ray Documentation", author, "manual"),
]
# -- Options for manual page output ---------------------------------------
man_pages = [(master_doc, "ray", "Ray Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
(
master_doc,
"Ray",
"Ray Documentation",
author,
"Ray",
"Ray provides a simple, universal API for building distributed applications.",
"Miscellaneous",
),
]
# Python methods should be presented in source code order
autodoc_member_order = "bysource"
def setup(app):
app.connect("html-page-context", update_context)
# Custom CSS
app.add_css_file("css/custom.css", priority=800)
app.add_css_file(
"https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css"
)
# Custom JS
app.add_js_file(
"https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js",
defer="defer",
)
app.add_js_file("js/docsearch.js", defer="defer")
# Custom docstring processor
app.connect("autodoc-process-docstring", fix_xgb_lgbm_docs)
| ray-project/ray | doc/source/conf.py | Python | apache-2.0 | 7,916 | [
"VisIt"
] | 96725ce3c689e13037a7355823f788866c1e82b97188af2ad33f1f94baf732f3 |
"""
#;+
#; NAME:
#; galaxy.core
#; Version 1.0
#;
#; PURPOSE:
#; Core routines for galaxy analysis
#; 29-Nov-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import os, copy, sys
import numpy as np
from astropy import units as u
from astropy.io import ascii
from astropy.coordinates import SkyCoord
from xastropy.xutils import xdebug as xdb
# Class for LLS Absorption Lines
class Galaxy(object):
"""A Galaxy Class
Attributes:
name: string
Name(s)
z: float
Adopted redshift
coord: Coordinates
mstar: float
Stellar mass (MsolMass)
"""
# Initialize with a .dat file
def __init__(self, ra=None, dec=None, z=0.):
self.z = z
# Coord
if ra is None:
ras = '00 00 00'
else:
ras = str(ra)
if dec is None:
decs = '+00 00 00'
else:
decs = str(dec)
self.coord = SkyCoord(ras, decs, 'icrs', unit=(u.hour, u.deg))
# Name
self.name = ('J'+
self.coord.ra.to_string(unit=u.hour,sep='',pad=True)+
self.coord.dec.to_string(sep='',pad=True,alwayssign=True))
# #############
def __repr__(self):
return ('[Galaxy: {:s} {:s} {:s}, z={:g}]'.format(
self.name,
self.coord.ra.to_string(unit=u.hour,sep=':',pad=True),
self.coord.dec.to_string(sep=':',pad=True),
self.z) )
## #################################
## #################################
## TESTING
## #################################
if __name__ == '__main__':
# Instantiate
gal = Galaxy()
print(gal)
| astronomeara/xastropy-old | xastropy/galaxy/core.py | Python | bsd-3-clause | 1,859 | [
"Galaxy"
] | 3985c97c8677747053e9a9b6258d881e5fae11ea693ce3628d96ba127700627f |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable,redefined-builtin
from tensorflow.python.ops.control_flow_ops import *
# pylint: enable=wildcard-import
def _SwitchGrad(op, *grad):
"""Gradients for a Switch op is calculated using a Merge op.
If the switch is a loop switch, it will be visited twice. We create
the merge on the first visit, and update the other input of the merge
on the second visit. A next_iteration is also added on second visit.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
merge_grad = grad_ctxt.grad_state.switch_map.get(op)
if merge_grad is not None:
# This is the second time this Switch is visited. It comes from
# the non-exit branch of the Switch, so update the second input
# to the Merge.
# TODO(yuanbyu): Perform shape inference with this new input.
if grad[1] is not None:
# pylint: disable=protected-access
control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1],
enforce_shape_invariant=False)
# pylint: enable=protected-access
return None, None
elif grad[0] is not None:
# This is the first time this Switch is visited. It comes from
# the Exit branch, which is grad[0]. grad[1] is empty at this point.
# Use grad[0] for both inputs to merge for now, but update the second
# input of merge when we see this Switch the second time.
merge_grad = merge([grad[0], grad[0]], name="b_switch")[0]
grad_ctxt.grad_state.switch_map[op] = merge_grad
return merge_grad, None
else:
# This is the first time this Switch is visited. It comes from the
# Identity branch. Such a Switch has `None` gradient for the Exit branch,
# meaning the output is not differentiable.
return None, None
elif isinstance(op_ctxt, CondContext):
zero_grad = grad[1 - op_ctxt.branch]
# At this point, we have created zero_grad guarded by the right switch.
# Unfortunately, we may still get None here for not trainable data types.
if zero_grad is None:
# For resource variables we get None always on the other branch, so bypass
# this.
if op.inputs[0].dtype == dtypes.resource:
return merge(
[grad[op_ctxt.branch]] * 2, name="cond_resource_grad")[0], None
return None, None
return merge(grad, name="cond_grad")[0], None
else:
false_grad = switch(grad[0], op.inputs[1])[0]
true_grad = switch(grad[1], op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
ops.RegisterGradient("Switch")(_SwitchGrad)
ops.RegisterGradient("RefSwitch")(_SwitchGrad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
"""Gradients for a Merge op are calculated using a Switch op."""
input_op = op.inputs[0].op
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = control_flow_util.GetOutputContext(input_op)
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
# pylint: enable=protected-access
elif isinstance(op_ctxt, CondContext):
pred = op_ctxt.pred
if grad_ctxt and grad_ctxt.grad_state:
# This Merge node is part of a cond within a loop.
# The backprop needs to have the value of this predicate for every
# iteration. So we must have its values accumulated in the forward, and
# use the accumulated values as the predicate for this backprop switch.
grad_state = grad_ctxt.grad_state
real_pred = grad_state.history_map.get(pred.name)
if real_pred is None:
# Remember the value of pred for every iteration.
grad_ctxt = grad_state.grad_context
grad_ctxt.Exit()
history_pred = grad_state.AddForwardAccumulator(pred)
grad_ctxt.Enter()
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
# pylint: enable=protected-access
else:
num_inputs = len(op.inputs)
cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]
# pylint: disable=protected-access
return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
for i in xrange(num_inputs)]
# pylint: enable=protected-access
@ops.RegisterGradient("RefMerge")
def _RefMergeGrad(op, grad, _):
return _MergeGrad(op, grad, _)
@ops.RegisterGradient("Exit")
def _ExitGrad(op, grad):
"""Gradients for an exit op are calculated using an Enter op."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# The flag `back_prop` is set by users to suppress gradient
# computation for this loop. If the attribute `back_prop` is false,
# no gradient computation.
return None
if op_ctxt.grad_state:
raise TypeError("Second-order gradient for while loops not supported.")
if isinstance(grad, ops.Tensor):
grad_ctxt.AddName(grad.name)
else:
if not isinstance(grad, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError(f"Type {type(grad)} not supported, must be either"
"`ops.IndexedSlices` or `SparseTensor`.")
grad_ctxt.AddName(grad.values.name)
grad_ctxt.AddName(grad.indices.name)
dense_shape = grad.dense_shape
if dense_shape is not None:
grad_ctxt.AddName(dense_shape.name)
grad_ctxt.Enter()
# pylint: disable=protected-access
result = control_flow_ops._Enter(
grad, grad_ctxt.name, is_constant=False,
parallel_iterations=grad_ctxt.parallel_iterations,
name="b_exit")
# pylint: enable=protected-access
grad_ctxt.loop_enters.append(result)
grad_ctxt.Exit()
return result
ops.RegisterGradient("RefExit")(_ExitGrad)
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
"""A forward next_iteration is translated into a backprop identity.
Note that the backprop next_iteration is added in switch grad.
"""
return grad
@ops.RegisterGradient("RefNextIteration")
def _RefNextIterationGrad(_, grad):
return _NextIterationGrad(_, grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
"""Gradients for an Enter are calculated using an Exit op.
For loop variables, grad is the gradient so just add an exit.
For loop invariants, we need to add an accumulator loop.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if grad_ctxt is None:
return grad
if not grad_ctxt.back_prop:
# Skip gradient computation, if the attribute `back_prop` is false.
return grad
if grad_ctxt.grad_state is None:
# Pass the gradient through if we are not in a gradient while context.
return grad
if op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
if isinstance(grad, ops.Tensor):
result = grad_ctxt.AddBackpropAccumulator(op, grad)
elif isinstance(grad, ops.IndexedSlices):
result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad)
else:
# TODO(yuanbyu, lukasr): Add support for SparseTensor.
raise TypeError(f"Type {type(grad)} not supported,"
"must be Tensor or Indexed Slices")
else:
result = exit(grad)
grad_ctxt.loop_exits.append(result)
grad_ctxt.ExitResult([result])
return result
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
"""Stop backprop for the predicate of a while loop."""
return None
| frreiss/tensorflow-fred | tensorflow/python/ops/control_flow_grad.py | Python | apache-2.0 | 9,572 | [
"VisIt"
] | 65fa436f5d916eb0d82a01a6d928d7d34c8a973b6a65acafe9e035ab3c8c530c |
# -*- coding: utf-8 -*-
"""
OficinaActivity.py
Create Oficina Activity
Copyright 2007, NATE-LSI-EPUSP
Oficina is developed in Brazil at Escola Politécnica of
Universidade de São Paulo. NATE is part of LSI (Integrable
Systems Laboratory) and stands for Learning, Work and Entertainment
Research Group. Visit our web page:
www.lsi.usp.br/nate
Suggestions, bugs and doubts, please email oficina@lsi.usp.br
Oficina is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation version 2 of
the License.
Oficina is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with Oficina; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
Boston, MA 02110-1301 USA.
The copy of the GNU General Public License is found in the
COPYING file included in the source distribution.
Authors:
Joyce Alessandra Saul (joycealess@gmail.com)
Andre Mossinato (andremossinato@gmail.com)
Nathalia Sautchuk Patrício (nathalia.sautchuk@gmail.com)
Pedro Kayatt (pekayatt@gmail.com)
Rafael Barbolo Lopes (barbolo@gmail.com)
Alexandre A. Gonçalves Martinazzo (alexandremartinazzo@gmail.com)
Colaborators:
Bruno Gola (brunogola@gmail.com)
Group Manager:
Irene Karaguilla Ficheman (irene@lsi.usp.br)
Cientific Coordinator:
Roseli de Deus Lopes (roseli@lsi.usp.br)
UI Design (OLPC):
Eben Eliason (eben@laptop.org)
Project Coordinator (OLPC):
Manusheel Gupta (manu@laptop.org)
Project Advisor (OLPC):
Walter Bender (walter@laptop.org)
"""
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
import logging
import json
from sugar3.activity import activity
from sugar3.graphics import style
from Area import Area
from toolbox import DrawToolbarBox
import dialogs
class OficinaActivity(activity.Activity):
def __init__(self, handle):
"""Initialize the OficinaActivity object.
@param self
@param handle
"""
activity.Activity.__init__(self, handle)
self.max_participants = 1
logging.debug('Starting Paint activity (Oficina)')
self._journal_images = []
self.fixed = Gtk.Fixed()
self._width = Gdk.Screen.width()
self._height = Gdk.Screen.height()
self.fixed.show()
self.fixed.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
self.textview = Gtk.TextView()
self.textview.set_events(Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK |
Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.BUTTON_MOTION_MASK |
Gdk.EventMask.TOUCH_MASK)
self.textview.connect('event', self.__textview_event_cb)
self.textview.connect("motion_notify_event",
self.__textview_mouse_move_cb)
self.fixed.put(self.textview, 0, 0)
# These attributes are used in other classes, so they should be public
self.area = Area(self)
self.area.show()
self.fixed.put(self.area, 0, 0)
self._sw = Gtk.ScrolledWindow()
self._sw.set_kinetic_scrolling(False)
self._sw.show()
self._sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.set_canvas(self._sw)
self.toolset_intialize_from_journal()
toolbar_box = DrawToolbarBox(self)
toolbar_box.show_all()
self.connect("key_press_event", self.key_press)
# setup self.area only once
def map_cp(widget):
def size_allocate_cb(widget, allocation):
widget.disconnect(self._setup_handle)
self.area.setup(allocation.width, allocation.height)
self.center_area()
self.canvas.add_with_viewport(self.fixed)
# to remove the border, we need set the shadowtype
# in the viewport child of the scrolledwindow
self.canvas.get_children()[0].set_shadow_type(Gtk.ShadowType.NONE)
self.disconnect(self._setup_handle)
self._setup_handle = self._sw.connect('size_allocate',
size_allocate_cb)
self._setup_handle = self.connect('map', map_cp)
# Handle screen rotation
Gdk.Screen.get_default().connect('size-changed', self._configure_cb)
def _configure_cb(self, event):
''' Rotate the drawing after a screen rotation '''
width = Gdk.Screen.width()
height = Gdk.Screen.height()
if (self._width > self._height) != (width > height):
GObject.timeout_add(100, self.area.rotate_right, self.area)
self._width = width
self._height = height
def key_press(self, widget, event):
print event.keyval
if event.keyval == 45:
self.area.change_line_size(-1)
if event.keyval == 43:
self.area.change_line_size(1)
def read_file(self, file_path):
'''Read file from Sugar Journal.'''
logging.debug('reading file %s, mimetype: %s, title: %s',
file_path, self.metadata['mime_type'],
self.metadata['title'])
self.area.load_from_file(file_path)
def size_allocate_cb(widget, allocation):
logging.error('read file size allocate')
self.fixed.disconnect(self._setup_handle)
width = self.area.drawing_canvas_data.get_width()
height = self.area.drawing_canvas_data.get_height()
if self.area.drawing_canvas is None:
self.area.setup(width, height)
# The scrolled window is confused with a image of the same size
# of the canvas when the toolbars popup and the scrolls
# keep visible.
if height > allocation.height or width > allocation.width:
self.canvas.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
else:
self.canvas.set_policy(Gtk.PolicyType.NEVER,
Gtk.PolicyType.AUTOMATIC)
self.center_area()
self.canvas.add_with_viewport(self.fixed)
# to remove the border, we need set the shadowtype
# in the viewport child of the scrolledwindow
self.canvas.get_children()[0].set_shadow_type(Gtk.ShadowType.NONE)
self.canvas.get_children()[0].set_border_width(0)
self.disconnect(self._setup_handle)
self._setup_handle = self.fixed.connect('size_allocate',
size_allocate_cb)
# disassociate with journal entry to avoid overwrite (SL #1771)
if self.metadata['mime_type'] != "image/png":
self._jobject.object_id = None
last_point_posi = self.metadata['title'].rfind('.')
if last_point_posi > -1:
title = self.metadata['title'][0:last_point_posi] + '.png'
self.metadata['title'] = title
logging.error('title: %s', self.metadata['title'])
if 'images' in self.metadata:
self._journal_images = json.loads(self.metadata['images'])
def write_file(self, file_path):
'''Save file on Sugar Journal. '''
width, height = self.area.get_size_request()
logging.debug('writting %s w=%s h=%s' % (file_path, width, height))
if self.area.text_in_progress:
self.area.d.text(self.area, 0, 0)
self.area.getout()
self.area.drawing_canvas.write_to_png(file_path)
self.metadata['mime_type'] = 'image/png'
self.metadata['state'] = json.dumps(self.area.tool)
self.metadata['images'] = json.dumps(dialogs.get_journal_images())
logging.debug('Wrote metadata[\'state\']: %s', self.metadata['state'])
def _get_area_displacement(self):
"""Return the point to use as top left corner in order to move
the drawing area and center it on the canvas.
"""
canvas_width = self.canvas.get_allocation().width
canvas_height = self.canvas.get_allocation().height
area_width, area_height = self.area.get_size_request()
# Avoid 'x' and 'y' to be outside the screen
x = max(0, (canvas_width - area_width) / 2)
y = max(0, (canvas_height - area_height) / 2)
return x, y
def center_area(self):
x, y = self._get_area_displacement()
self.fixed.move(self.area, x, y)
def move_textview(self, dx, dy):
x, y = self._get_area_displacement()
self.fixed.move(self.textview, x + dx, y + dy)
def toolset_intialize_from_journal(self):
try:
self.area.tool = json.loads(self.metadata['state'])
logging.debug('self.area.tool %s', self.area.tool)
except Exception as e:
logging.error("exception %s", e)
def __textview_event_cb(self, widget, event):
if event.type in (Gdk.EventType.TOUCH_BEGIN,
Gdk.EventType.TOUCH_CANCEL, Gdk.EventType.TOUCH_END,
Gdk.EventType.BUTTON_PRESS,
Gdk.EventType.BUTTON_RELEASE):
x = int(event.get_coords()[1])
y = int(event.get_coords()[2])
if event.type in (Gdk.EventType.TOUCH_BEGIN,
Gdk.EventType.BUTTON_PRESS):
self._initial_textview_touch_x = x
self._initial_textview_touch_y = y
elif event.type in (Gdk.EventType.TOUCH_END,
Gdk.EventType.BUTTON_RELEASE):
# be sure the textview don't have a selection pending
# and put the cursor at the end of the text
text_buf = self.textview.get_buffer()
end_text_iter = text_buf.get_end_iter()
text_buf.select_range(end_text_iter, end_text_iter)
def __textview_mouse_move_cb(self, widget, event):
x = event.x
y = event.y
if event.get_state() & Gdk.ModifierType.BUTTON1_MASK:
dx = x - self._initial_textview_touch_x
dy = y - self._initial_textview_touch_y
tv_alloc = self.textview.get_allocation()
self.move_textview(tv_alloc.x + dx, tv_alloc.y + dy)
| samdroid-apps/paint-activity | OficinaActivity.py | Python | gpl-2.0 | 10,961 | [
"VisIt"
] | 76e4f765e1904e4c1388adbe2ee0a6abbcb1073004819dcfb277abaa5de3f403 |
# $HeadURL: $
''' LogPolicyResultAction
'''
from DIRAC import S_OK, S_ERROR
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.PolicySystem.Actions.BaseAction import BaseAction
__RCSID__ = '$Id: $'
class LogPolicyResultAction( BaseAction ):
'''
Action that registers on the database a new entry per policy result in the
list singlePolicyResults.
'''
def __init__( self, name, decissionParams, enforcementResult, singlePolicyResults,
clients = None ):
super( LogPolicyResultAction, self ).__init__( name, decissionParams, enforcementResult,
singlePolicyResults, clients )
if clients is not None and 'ResourceManagementClient' in clients:
self.rmClient = clients[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def run( self ):
'''
Checks it has the parameters it needs and tries to addOrModify in the
database.
'''
element = self.decissionParams[ 'element' ]
if element is None:
return S_ERROR( 'element should not be None' )
name = self.decissionParams[ 'name' ]
if name is None:
return S_ERROR( 'name should not be None' )
statusType = self.decissionParams[ 'statusType' ]
if statusType is None:
return S_ERROR( 'statusType should not be None' )
for singlePolicyResult in self.singlePolicyResults:
status = singlePolicyResult[ 'Status' ]
if status is None:
return S_ERROR( 'status should not be None' )
reason = singlePolicyResult[ 'Reason' ]
if reason is None:
return S_ERROR( 'reason should not be None' )
policyName = singlePolicyResult[ 'Policy' ][ 'name' ]
if policyName is None:
return S_ERROR( 'policyName should not be None' )
#Truncate reason to fit in database column
reason = ( reason[ :508 ] + '..') if len( reason ) > 508 else reason
polUpdateRes = self.rmClient.addOrModifyPolicyResult( element = element,
name = name,
policyName = policyName,
statusType = statusType,
status = status,
reason = reason )
if not polUpdateRes[ 'OK' ]:
return polUpdateRes
return S_OK()
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF | avedaee/DIRAC | ResourceStatusSystem/PolicySystem/Actions/LogPolicyResultAction.py | Python | gpl-3.0 | 2,857 | [
"DIRAC"
] | bf9cb7e84fafc53de4c6107150902580b4a1b163c7f4cd9f91db76ff67612f46 |
import time
from math import sqrt
import numpy as np
from ase.utils import rotate
from ase.data import covalent_radii
from ase.data.colors import jmol_colors
class EPS:
def __init__(self, atoms,
rotation='', show_unit_cell=False, radii=None,
bbox=None, colors=None, scale=20):
self.numbers = atoms.get_atomic_numbers()
self.colors = colors
if colors is None:
self.colors = jmol_colors[self.numbers]
if radii is None:
radii = covalent_radii[self.numbers]
elif type(radii) is float:
radii = covalent_radii[self.numbers] * radii
else:
radii = np.array(radii)
natoms = len(atoms)
if isinstance(rotation, str):
rotation = rotate(rotation)
A = atoms.get_cell()
if show_unit_cell > 0:
L, T, D = self.cell_to_lines(A)
C = np.empty((2, 2, 2, 3))
for c1 in range(2):
for c2 in range(2):
for c3 in range(2):
C[c1, c2, c3] = np.dot([c1, c2, c3], A)
C.shape = (8, 3)
C = np.dot(C, rotation) # Unit cell vertices
else:
L = np.empty((0, 3))
T = None
D = None
C = None
nlines = len(L)
X = np.empty((natoms + nlines, 3))
R = atoms.get_positions()
X[:natoms] = R
X[natoms:] = L
r2 = radii**2
for n in range(nlines):
d = D[T[n]]
if ((((R - L[n] - d)**2).sum(1) < r2) &
(((R - L[n] + d)**2).sum(1) < r2)).any():
T[n] = -1
X = np.dot(X, rotation)
R = X[:natoms]
if bbox is None:
X1 = (R - radii[:, None]).min(0)
X2 = (R + radii[:, None]).max(0)
if show_unit_cell == 2:
X1 = np.minimum(X1, C.min(0))
X2 = np.maximum(X2, C.max(0))
M = (X1 + X2) / 2
S = 1.05 * (X2 - X1)
w = scale * S[0]
if w > 500:
w = 500
scale = w / S[0]
h = scale * S[1]
offset = np.array([scale * M[0] - w / 2, scale * M[1] - h / 2, 0])
else:
w = (bbox[2] - bbox[0]) * scale
h = (bbox[3] - bbox[1]) * scale
offset = np.array([bbox[0], bbox[1], 0]) * scale
self.w = w
self.h = h
X *= scale
X -= offset
if nlines > 0:
D = np.dot(D, rotation)[:, :2] * scale
if C is not None:
C *= scale
C -= offset
A = np.dot(A, rotation)
A *= scale
self.A = A
self.X = X
self.D = D
self.T = T
self.C = C
self.natoms = natoms
self.d = 2 * scale * radii
def cell_to_lines(self, A):
nlines = 0
nn = []
for c in range(3):
d = sqrt((A[c]**2).sum())
n = max(2, int(d / 0.3))
nn.append(n)
nlines += 4 * n
X = np.empty((nlines, 3))
T = np.empty(nlines, int)
D = np.zeros((3, 3))
n1 = 0
for c in range(3):
n = nn[c]
dd = A[c] / (4 * n - 2)
D[c] = dd
P = np.arange(1, 4 * n + 1, 4)[:, None] * dd
T[n1:] = c
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
n2 = n1 + n
X[n1:n2] = P + i * A[(c + 1) % 3] + j * A[(c + 2) % 3]
n1 = n2
return X, T, D
def write(self, filename):
self.filename = filename
self.write_header()
self.write_body()
self.write_trailer()
def write_header(self):
import matplotlib
if matplotlib.__version__ <= '0.8':
raise RuntimeError('Your version of matplotlib (%s) is too old' %
matplotlib.__version__)
from matplotlib.backends.backend_ps import RendererPS, \
GraphicsContextPS, psDefs
self.fd = open(self.filename, 'w')
self.fd.write('%!PS-Adobe-3.0 EPSF-3.0\n')
self.fd.write('%%Creator: G2\n')
self.fd.write('%%CreationDate: %s\n' % time.ctime(time.time()))
self.fd.write('%%Orientation: portrait\n')
bbox = (0, 0, self.w, self.h)
self.fd.write('%%%%BoundingBox: %d %d %d %d\n' % bbox)
self.fd.write('%%EndComments\n')
Ndict = len(psDefs)
self.fd.write('%%BeginProlog\n')
self.fd.write('/mpldict %d dict def\n' % Ndict)
self.fd.write('mpldict begin\n')
for d in psDefs:
d = d.strip()
for l in d.split('\n'):
self.fd.write(l.strip() + '\n')
self.fd.write('%%EndProlog\n')
self.fd.write('mpldict begin\n')
self.fd.write('%d %d 0 0 clipbox\n' % (self.w, self.h))
self.renderer = RendererPS(self.w, self.h, self.fd)
def write_body(self):
try:
from matplotlib.path import Path
except ImportError:
Path = None
from matplotlib.patches import Circle, Polygon
else:
from matplotlib.patches import Circle, PathPatch
indices = self.X[:, 2].argsort()
for a in indices:
xy = self.X[a, :2]
if a < self.natoms:
circle = Circle(xy, self.d[a] / 2, facecolor=self.colors[a])
circle.draw(self.renderer)
else:
a -= self.natoms
c = self.T[a]
if c != -1:
hxy = self.D[c]
if Path is None:
line = Polygon((xy + hxy, xy - hxy))
else:
line = PathPatch(Path((xy + hxy, xy - hxy)))
line.draw(self.renderer)
def write_trailer(self):
self.fd.write('end\n')
self.fd.write('showpage\n')
self.fd.close()
def write_eps(filename, atoms, **parameters):
if isinstance(atoms, list):
assert len(atoms) == 1
atoms = atoms[0]
EPS(atoms, **parameters).write(filename)
| slabanja/ase | ase/io/eps.py | Python | gpl-2.0 | 6,231 | [
"ASE"
] | 5a23ba7609e06203f707ef53877c90d04b5e4ef54b3a9a3878c30c00c75ab9a1 |
#
# Copyright (C) 2001-2004 greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" The "parser" for compound descriptors.
I almost hesitate to document this, because it's not the prettiest
thing the world has ever seen... but it does work (for at least some
definitions of the word).
Rather than getting into the whole mess of writing a parser for the
compound descriptor expressions, I'm just using string substitutions
and python's wonderful ability to *eval* code.
It would probably be a good idea at some point to replace this with a
real parser, if only for the flexibility and intelligent error
messages that would become possible.
The general idea is that we're going to deal with expressions where
atomic descriptors have some kind of method applied to them which
reduces them to a single number for the entire composition. Compound
descriptors (those applicable to the compound as a whole) are not
operated on by anything in particular (except for standard math stuff).
Here's the general flow of things:
1) Composition descriptor references ($a, $b, etc.) are replaced with the
corresponding descriptor names using string subsitution.
(*_SubForCompoundDescriptors*)
2) Atomic descriptor references ($1, $2, etc) are replaced with lookups
into the atomic dict with "DEADBEEF" in place of the atom name.
(*_SubForAtomicVars*)
3) Calls to Calculator Functions are augmented with a reference to
the composition and atomic dictionary
(*_SubMethodArgs*)
**NOTE:**
anytime we don't know the answer for a descriptor, rather than
throwing a (completely incomprehensible) exception, we just return
-666. So bad descriptor values should stand out like sore thumbs.
"""
__DEBUG=0
from rdkit import RDConfig
import string
# we do this to allow the use of stuff in the math module
from math import *
#----------------------
# atomic descriptor section
#----------------------
# these are the methods which can be applied to ATOMIC descriptors.
knownMethods = ['SUM','MIN','MAX','MEAN','AVG','DEV','HAS']
def HAS(strArg,composList,atomDict):
""" *Calculator Method*
does a string search
**Arguments**
- strArg: the arguments in string form
- composList: the composition vector
- atomDict: the atomic dictionary
**Returns**
1 or 0
"""
splitArgs = string.split(strArg,',')
if len(splitArgs)>1:
for atom,num in composList:
tStr = string.replace(splitArgs[0],'DEADBEEF',atom)
where = eval(tStr)
what = eval(splitArgs[1])
if string.find(where,what)!= -1:
return 1
return 0
else:
return -666
def SUM(strArg,composList,atomDict):
""" *Calculator Method*
calculates the sum of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = 0.0
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum = accum + eval(tStr)*num
return accum
def MEAN(strArg,composList,atomDict):
""" *Calculator Method*
calculates the average of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = 0.0
nSoFar = 0
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum = accum + eval(tStr)*num
nSoFar = nSoFar + num
return accum/nSoFar
AVG = MEAN
def DEV(strArg,composList,atomDict):
""" *Calculator Method*
calculates the average deviation of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
avg = MEAN(strArg,composList,atomDict)
accum = 0.0
nSoFar = 0.0
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum = accum + abs(eval(tStr)-avg)*num
nSoFar = nSoFar + num
return accum/nSoFar
def MIN(strArg,composList,atomDict):
""" *Calculator Method*
calculates the minimum value of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = []
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum.append(eval(tStr))
return min(accum)
def MAX(strArg,composList,atomDict):
""" *Calculator Method*
calculates the maximum value of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = []
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum.append(eval(tStr))
return max(accum)
#------------------
# string replacement routines
# these are not intended to be called by clients
#------------------
def _SubForAtomicVars(cExpr,varList,dictName):
""" replace atomic variables with the appropriate dictionary lookup
*Not intended for client use*
"""
for i in xrange(len(varList)):
cExpr = string.replace(cExpr,'$%d'%(i+1),
'%s["DEADBEEF"]["%s"]'%(dictName,varList[i]))
return cExpr
def _SubForCompoundDescriptors(cExpr,varList,dictName):
""" replace compound variables with the appropriate list index
*Not intended for client use*
"""
for i in xrange(len(varList)):
cExpr = string.replace(cExpr,'$%s'%chr(ord('a')+i),
'%s["%s"]'%(dictName,varList[i]))
return cExpr
def _SubMethodArgs(cExpr,knownMethods):
""" alters the arguments of calls to calculator methods
*Not intended for client use*
This is kind of putrid (and the code ain't so pretty either)
The general idea is that the various special methods for atomic
descriptors need two extra arguments (the composition and the atomic
dict). Rather than make the user type those in, we just find
invocations of these methods and fill out the function calls using
string replacements.
"""
res = cExpr
for method in knownMethods:
p = 0
while p != -1 and p < len(res):
p = string.find(res,method,p)
if p != -1:
p = p + len(method) + 1
start = p
parenCount = 1
while parenCount and p < len(res):
if res[p] == ')':
parenCount = parenCount - 1
elif res[p] == '(':
parenCount = parenCount + 1
p = p + 1
if p <= len(res):
res = res[0:start]+"'%s',compos,atomDict"%(res[start:p-1])+res[p-1:]
return res
def CalcSingleCompoundDescriptor(compos,argVect,atomDict,propDict):
""" calculates the value of the descriptor for a single compound
**ARGUMENTS:**
- compos: a vector/tuple containing the composition
information... in the form:
'[("Fe",1.),("Pt",2.),("Rh",0.02)]'
- argVect: a vector/tuple with three elements:
1) AtomicDescriptorNames: a list/tuple of the names of the
atomic descriptors being used. These determine the
meaning of $1, $2, etc. in the expression
2) CompoundDescriptorNames: a list/tuple of the names of the
compound descriptors being used. These determine the
meaning of $a, $b, etc. in the expression
3) Expr: a string containing the expression to be used to
evaluate the final result.
- atomDict:
a dictionary of atomic descriptors. Each atomic entry is
another dictionary containing the individual descriptors
and their values
- propVect:
a list of descriptors for the composition.
**RETURNS:**
the value of the descriptor, -666 if a problem was encountered
**NOTE:**
- because it takes rather a lot of work to get everything set
up to calculate a descriptor, if you are calculating the
same descriptor for multiple compounds, you probably want to
be calling _CalcMultipleCompoundsDescriptor()_.
"""
try:
atomVarNames = argVect[0]
compositionVarNames = argVect[1]
formula = argVect[2]
formula = _SubForCompoundDescriptors(formula,compositionVarNames,'propDict')
formula = _SubForAtomicVars(formula,atomVarNames,'atomDict')
evalTarget = _SubMethodArgs(formula,knownMethods)
except:
if __DEBUG:
import sys,traceback
print 'Sub Failure!'
traceback.print_exc()
print evalTarget
print propDict
raise RuntimeError,'Failure 1'
else:
return -666
try:
v = eval(evalTarget)
except:
if __DEBUG:
import sys,traceback
outF = open(RDConfig.RDCodeDir+'/ml/descriptors/log.txt','a+')
outF.write('#------------------------------\n')
outF.write('formula: %s\n'%repr(formula))
outF.write('target: %s\n'%repr(evalTarget))
outF.write('propDict: %s\n'%(repr(propDict)))
try:
outF.write('keys: %s\n'%(repr(atomDict.keys())))
except:
outF.write('no atomDict\n')
outF.close()
print 'ick!'
print 'formula:',formula
print 'target:',evalTarget
print 'propDict:',propDict
print 'keys:',atomDict.keys()
traceback.print_exc()
raise RuntimeError,'Failure 2'
else:
v = -666
return v
def CalcMultipleCompoundsDescriptor(composVect,argVect,atomDict,propDictList):
""" calculates the value of the descriptor for a list of compounds
**ARGUMENTS:**
- composVect: a vector of vector/tuple containing the composition
information.
See _CalcSingleCompoundDescriptor()_ for an explanation of the elements.
- argVect: a vector/tuple with three elements:
1) AtomicDescriptorNames: a list/tuple of the names of the
atomic descriptors being used. These determine the
meaning of $1, $2, etc. in the expression
2) CompoundDsscriptorNames: a list/tuple of the names of the
compound descriptors being used. These determine the
meaning of $a, $b, etc. in the expression
3) Expr: a string containing the expression to be used to
evaluate the final result.
- atomDict:
a dictionary of atomic descriptors. Each atomic entry is
another dictionary containing the individual descriptors
and their values
- propVectList:
a vector of vectors of descriptors for the composition.
**RETURNS:**
a vector containing the values of the descriptor for each
compound. Any given entry will be -666 if problems were
encountered
"""
res = [-666]*len(composVect)
try:
atomVarNames = argVect[0]
compositionVarNames = argVect[1]
formula = argVect[2]
formula = _SubForCompoundDescriptors(formula,compositionVarNames,'propDict')
formula = _SubForAtomicVars(formula,atomVarNames,'atomDict')
evalTarget = _SubMethodArgs(formula,knownMethods)
except:
return res
for i in xrange(len(composVect)):
propDict = propDictList[i]
compos = composVect[i]
try:
v = eval(evalTarget)
except:
v = -666
res[i] = v
return res
#------------
# Demo/testing code
#------------
if __name__ == '__main__':
piece1 = [['d1','d2'],['d1','d2']]
aDict = {'Fe':{'d1':1.,'d2':2.},'Pt':{'d1':10.,'d2':20.}}
pDict = {'d1':100.,'d2':200.}
compos = [('Fe',1),('Pt',1)]
cExprs = ["SUM($1)","SUM($1)+SUM($2)","SUM($1)+SUM($1)","MEAN($1)","DEV($2)","MAX($1)","MIN($1)/MAX($1)",
"MIN($2)","SUM($1)/$a","sqrt($a+$b)","SUM((3.*$1)/($2))","foo"]
for cExpr in cExprs:
argVect = piece1 + [cExpr]
print cExpr
print CalcSingleCompoundDescriptor(compos,argVect,aDict,pDict)
print CalcMultipleCompoundsDescriptor([compos,compos],argVect,aDict,[pDict,pDict])
| rdkit/rdkit-orig | rdkit/ML/Descriptors/Parser.py | Python | bsd-3-clause | 12,237 | [
"RDKit"
] | e8ba8e3703018478f9442fd10bab489a9a57567ad48bf12311c830ba2741ad0c |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function, unicode_literals)
import os
import sys
sys.path.append(os.getenv('HOME') + '/bin/python/libs')
# just in case notebook was not launched with the option
# %pylab inline
import pylab as plt
import numpy as np
from scipy import sparse
import matplotlib as mpl
from matplotlib.mlab import griddata
from matplotlib.ticker import MaxNLocator
from matplotlib.patches import Ellipse
from scipy.sparse import coo_matrix
from scipy.signal import convolve2d, convolve, gaussian
from copy import deepcopy
import re
try:
import faststats
except:
faststats = None
# ==============================================================================
# Python 3 compatibility behavior
# ==============================================================================
# remap some python 2 built-ins on to py3k behavior or equivalent
# Most of them become generators
import operator
PY3 = sys.version_info[0] > 2
if PY3:
iteritems = operator.methodcaller('items')
itervalues = operator.methodcaller('values')
basestring = (str, bytes)
else:
range = xrange
from itertools import izip as zip
iteritems = operator.methodcaller('iteritems')
itervalues = operator.methodcaller('itervalues')
basestring = (str, unicode)
# ==============================================================================
# ============= FIGURE SETUP FUNCTIONS =========================================
# ==============================================================================
def tight_layout():
from matplotlib import get_backend
from pylab import gcf
if get_backend().lower() in ['agg', 'macosx']:
gcf().set_tight_layout(True)
else:
plt.tight_layout()
def theme(ax=None, minorticks=False):
""" update plot to make it nice and uniform """
from matplotlib.ticker import AutoMinorLocator
from pylab import rcParams, gca, tick_params
if minorticks:
if ax is None:
ax = gca()
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.xaxis.set_minor_locator(AutoMinorLocator())
tick_params(which='both', width=rcParams['lines.linewidth'])
def steppify(x, y):
""" Steppify a curve (x,y). Useful for manually filling histograms """
dx = 0.5 * (x[1:] + x[:-1])
xx = np.zeros( 2 * len(dx), dtype=float)
yy = np.zeros( 2 * len(y), dtype=float)
xx[0::2], xx[1::2] = dx, dx
yy[0::2], yy[1::2] = y, y
xx = np.concatenate(([x[0] - (dx[0] - x[0])], xx, [x[-1] + (x[-1] - dx[-1])]))
return xx, yy
def colorify(data, vmin=None, vmax=None, cmap=plt.cm.Spectral):
""" Associate a color map to a quantity vector """
try:
from matplotlib.colors import Normalize
except ImportError:
# old mpl
from matplotlib.colors import normalize as Normalize
_vmin = vmin or min(data)
_vmax = vmax or max(data)
cNorm = Normalize(vmin=_vmin, vmax=_vmax)
scalarMap = plt.cm.ScalarMappable(norm=cNorm, cmap=cmap)
try:
colors = scalarMap.to_rgba(data)
except:
colors = list(map(scalarMap.to_rgba, data))
return colors, scalarMap
def devectorize_axes(ax=None, dpi=None, transparent=True):
"""Convert axes contents to a png.
This is useful when plotting many points, as the size of the saved file
can become very large otherwise.
Parameters
----------
ax : Axes instance (optional)
Axes to de-vectorize. If None, this uses the current active axes
(plt.gca())
dpi: int (optional)
resolution of the png image. If not specified, the default from
'savefig.dpi' in rcParams will be used
transparent : bool (optional)
if True (default) then the PNG will be made transparent
Returns
-------
ax : Axes instance
the in-place modified Axes instance
Examples
--------
The code can be used in the following way::
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x, y = np.random.random((2, 10000))
ax.scatter(x, y)
devectorize_axes(ax)
plt.savefig('devectorized.pdf')
The resulting figure will be much smaller than the vectorized version.
"""
from matplotlib.transforms import Bbox
from matplotlib import image
try:
from io import BytesIO as StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
if ax is None:
ax = plt.gca()
fig = ax.figure
axlim = ax.axis()
# setup: make all visible spines (axes & ticks) & text invisible
# we need to set these back later, so we save their current state
_sp = {}
_txt_vis = [t.get_visible() for t in ax.texts]
for k in ax.spines:
_sp[k] = ax.spines[k].get_visible()
ax.spines[k].set_visible(False)
for t in ax.texts:
t.set_visible(False)
_xax = ax.xaxis.get_visible()
_yax = ax.yaxis.get_visible()
_patch = ax.axesPatch.get_visible()
ax.axesPatch.set_visible(False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# convert canvas to PNG
extents = ax.bbox.extents / fig.dpi
sio = StringIO()
plt.savefig(sio, format='png', dpi=dpi,
transparent=transparent,
bbox_inches=Bbox([extents[:2], extents[2:]]))
sio.seek(0)
im = image.imread(sio)
# clear everything on axis (but not text)
ax.lines = []
ax.patches = []
ax.tables = []
ax.artists = []
ax.images = []
ax.collections = []
# Show the image
ax.imshow(im, extent=axlim, aspect='auto', interpolation='nearest')
# restore all the spines & text
for k in ax.spines:
ax.spines[k].set_visible(_sp[k])
for t, v in zip(ax.texts, _txt_vis):
t.set_visible(v)
ax.axesPatch.set_visible(_patch)
ax.xaxis.set_visible(_xax)
ax.yaxis.set_visible(_yax)
if plt.isinteractive():
plt.draw()
return ax
def hist_with_err(x, xerr, bins=None, normed=False, step=False, *kwargs):
from scipy import integrate
# check inputs
assert( len(x) == len(xerr) ), 'data size mismatch'
_x = np.asarray(x).astype(float)
_xerr = np.asarray(xerr).astype(float)
# def the evaluation points
if (bins is None) | (not hasattr(bins, '__iter__')):
m = (_x - _xerr).min()
M = (_x + _xerr).max()
dx = M - m
m -= 0.2 * dx
M += 0.2 * dx
if bins is not None:
N = int(bins)
else:
N = 10
_xp = np.linspace(m, M, N)
else:
_xp = 0.5 * (bins[1:] + bins[:-1])
def normal(v, mu, sig):
norm_pdf = 1. / (np.sqrt(2. * np.pi) * sig ) * np.exp( - ( (v - mu ) / (2. * sig) ) ** 2 )
return norm_pdf / integrate.simps(norm_pdf, v)
_yp = np.array([normal(_xp, xk, xerrk) for xk, xerrk in zip(_x, _xerr) ]).sum(axis=0)
if normed:
_yp /= integrate.simps(_yp, _xp)
if step:
return steppify(_xp, _yp)
else:
return _xp, _yp
def hist_with_err_bootstrap(x, xerr, bins=None, normed=False, nsample=50, step=False, **kwargs):
x0, y0 = hist_with_err(x, xerr, bins=bins, normed=normed, step=step, **kwargs)
yn = np.empty( (nsample, len(y0)), dtype=float)
yn[0, :] = y0
for k in range(nsample - 1):
idx = np.random.randint(0, len(x), len(x))
yn[k, :] = hist_with_err(x[idx], xerr[idx], bins=bins, normed=normed, step=step, **kwargs)[1]
return x0, yn
def __get_hesse_bins__(_x, _xerr=0., bins=None, margin=0.2):
if (bins is None) | (not hasattr(bins, '__iter__')):
m = (_x - _xerr).min()
M = (_x + _xerr).max()
dx = M - m
m -= margin * dx
M += margin * dx
if bins is not None:
N = int(bins)
else:
N = 10
_xp = np.linspace(m, M, N)
else:
_xp = 0.5 * (bins[1:] + bins[:-1])
return _xp
def scatter_contour(x, y,
levels=10,
bins=40,
threshold=50,
log_counts=False,
histogram2d_args={},
plot_args={},
contour_args={},
ax=None):
"""Scatter plot with contour over dense regions
Parameters
----------
x, y : arrays
x and y data for the contour plot
levels : integer or array (optional, default=10)
number of contour levels, or array of contour levels
threshold : float (default=100)
number of points per 2D bin at which to begin drawing contours
log_counts :boolean (optional)
if True, contour levels are the base-10 logarithm of bin counts.
histogram2d_args : dict
keyword arguments passed to numpy.histogram2d
see doc string of numpy.histogram2d for more information
plot_args : dict
keyword arguments passed to pylab.scatter
see doc string of pylab.scatter for more information
contourf_args : dict
keyword arguments passed to pylab.contourf
see doc string of pylab.contourf for more information
ax : pylab.Axes instance
the axes on which to plot. If not specified, the current
axes will be used
"""
if ax is None:
ax = plt.gca()
H, xbins, ybins = np.histogram2d(x, y, **histogram2d_args)
if log_counts:
H = np.log10(1 + H)
threshold = np.log10(1 + threshold)
levels = np.asarray(levels)
if levels.size == 1:
levels = np.linspace(threshold, H.max(), levels)
extent = [xbins[0], xbins[-1], ybins[0], ybins[-1]]
i_min = np.argmin(levels)
# draw a zero-width line: this gives us the outer polygon to
# reduce the number of points we draw
# somewhat hackish... we could probably get the same info from
# the filled contour below.
outline = ax.contour(H.T, levels[i_min:i_min + 1],
linewidths=0, extent=extent)
try:
outer_poly = outline.allsegs[0][0]
ax.contourf(H.T, levels, extent=extent, **contour_args)
X = np.hstack([x[:, None], y[:, None]])
try:
# this works in newer matplotlib versions
from matplotlib.path import Path
points_inside = Path(outer_poly).contains_points(X)
except:
# this works in older matplotlib versions
import matplotlib.nxutils as nx
points_inside = nx.points_inside_poly(X, outer_poly)
Xplot = X[~points_inside]
ax.plot(Xplot[:, 0], Xplot[:, 1], zorder=1, **plot_args)
except IndexError:
ax.plot(x, y, zorder=1, **plot_args)
def latex_float(f, precision=0.2, delimiter=r'\times'):
float_str = ("{0:" + str(precision) + "g}").format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return (r"{0}" + delimiter + "10^{{{1}}}").format(base, int(exponent))
else:
return float_str
# ==============================================================================
# ==============================================================================
# ==============================================================================
def ezrc(fontSize=22., lineWidth=2., labelSize=None, tickmajorsize=10,
tickminorsize=5, figsize=(8, 6)):
"""
slides - Define params to make pretty fig for slides
"""
from pylab import rc, rcParams
if labelSize is None:
labelSize = fontSize + 5
rc('figure', figsize=figsize)
rc('lines', linewidth=lineWidth)
rcParams['grid.linewidth'] = lineWidth
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['font.serif'] = ['Helvetica']
rcParams['font.family'] = ['Times New Roman']
rc('font', size=fontSize, family='serif', weight='bold')
rc('axes', linewidth=lineWidth, labelsize=labelSize)
rc('legend', borderpad=0.1, markerscale=1., fancybox=False)
rc('text', usetex=True)
rc('image', aspect='auto')
rc('ps', useafm=True, fonttype=3)
rcParams['xtick.major.size'] = tickmajorsize
rcParams['xtick.minor.size'] = tickminorsize
rcParams['ytick.major.size'] = tickmajorsize
rcParams['ytick.minor.size'] = tickminorsize
rcParams['text.latex.preamble'] = ["\\usepackage{amsmath}"]
def hide_axis(where, ax=None):
ax = ax or plt.gca()
if type(where) == str:
_w = [where]
else:
_w = where
[sk.set_color('None') for k, sk in ax.spines.items() if k in _w ]
if 'top' in _w and 'bottom' in _w:
ax.xaxis.set_ticks_position('none')
elif 'top' in _w:
ax.xaxis.set_ticks_position('bottom')
elif 'bottom' in _w:
ax.xaxis.set_ticks_position('top')
if 'left' in _w and 'right' in _w:
ax.yaxis.set_ticks_position('none')
elif 'left' in _w:
ax.yaxis.set_ticks_position('right')
elif 'right' in _w:
ax.yaxis.set_ticks_position('left')
plt.draw_if_interactive()
def despine(fig=None, ax=None, top=True, right=True,
left=False, bottom=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure
figure to despine all axes of, default uses current figure
ax : matplotlib axes
specific axes object to despine
top, right, left, bottom : boolean
if True, remove that spine
"""
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
ax_i.spines[side].set_visible(not locals()[side])
def shift_axis(which, delta, where='outward', ax=None):
ax = ax or plt.gca()
if type(which) == str:
_w = [which]
else:
_w = which
scales = (ax.xaxis.get_scale(), ax.yaxis.get_scale())
lbls = (ax.xaxis.get_label(), ax.yaxis.get_label())
for wk in _w:
ax.spines[wk].set_position((where, delta))
ax.set_xscale(scales[0])
ax.set_yscale(scales[1])
ax.xaxis.set_label(lbls[0])
ax.yaxis.set_label(lbls[1])
plt.draw_if_interactive()
class AutoLocator(MaxNLocator):
def __init__(self, nbins=9, steps=[1, 2, 5, 10], **kwargs):
MaxNLocator.__init__(self, nbins=nbins, steps=steps, **kwargs )
def setMargins(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None):
"""
Tune the subplot layout via the meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
plt.subplots_adjust(left, bottom, right, top, wspace, hspace)
plt.draw_if_interactive()
def setNmajors(xval=None, yval=None, ax=None, mode='auto', **kwargs):
"""
setNmajors - set major tick number
see figure.MaxNLocator for kwargs
"""
if ax is None:
ax = plt.gca()
if (mode == 'fixed'):
if xval is not None:
ax.xaxis.set_major_locator(MaxNLocator(xval, **kwargs))
if yval is not None:
ax.yaxis.set_major_locator(MaxNLocator(yval, **kwargs))
elif (mode == 'auto'):
if xval is not None:
ax.xaxis.set_major_locator(AutoLocator(xval, **kwargs))
if yval is not None:
ax.yaxis.set_major_locator(AutoLocator(yval, **kwargs))
plt.draw_if_interactive()
def crazy_histogram2d(x, y, bins=10, weights=None, reduce_w=None, NULL=None, reinterp=None):
"""
Compute the sparse bi-dimensional histogram of two data samples where *x*,
and *y* are 1-D sequences of the same length. If *weights* is None
(default), this is a histogram of the number of occurences of the
observations at (x[i], y[i]).
If *weights* is specified, it specifies values at the coordinate (x[i],
y[i]). These values are accumulated for each bin and then reduced according
to *reduce_w* function, which defaults to numpy's sum function (np.sum).
(If *weights* is specified, it must also be a 1-D sequence of the same
length as *x* and *y*.)
INPUTS:
x ndarray[ndim=1] first data sample coordinates
y ndarray[ndim=1] second data sample coordinates
KEYWORDS:
bins the bin specification
int the number of bins for the two dimensions (nx=ny=bins)
or [int, int] the number of bins in each dimension (nx, ny = bins)
weights ndarray[ndim=1] values *w_i* weighing each sample *(x_i, y_i)*
accumulated and reduced (using reduced_w) per bin
reduce_w callable function that will reduce the *weights* values accumulated per bin
defaults to numpy's sum function (np.sum)
NULL value type filling missing data value
reinterp str values are [None, 'nn', linear']
if set, reinterpolation is made using mlab.griddata to fill missing data
within the convex polygone that encloses the data
OUTPUTS:
B ndarray[ndim=2] bi-dimensional histogram
extent tuple(4) (xmin, xmax, ymin, ymax) entension of the histogram
steps tuple(2) (dx, dy) bin size in x and y direction
"""
# define the bins (do anything you want here but needs edges and sizes of the 2d bins)
try:
nx, ny = bins
except TypeError:
nx = ny = bins
# values you want to be reported
if weights is None:
weights = np.ones(x.size)
if reduce_w is None:
reduce_w = np.sum
else:
if not hasattr(reduce_w, '__call__'):
raise TypeError('reduce function is not callable')
# culling nans
finite_inds = (np.isfinite(x) & np.isfinite(y) & np.isfinite(weights))
_x = np.asarray(x)[finite_inds]
_y = np.asarray(y)[finite_inds]
_w = np.asarray(weights)[finite_inds]
if not (len(_x) == len(_y)) & (len(_y) == len(_w)):
raise ValueError('Shape mismatch between x, y, and weights: {}, {}, {}'.format(_x.shape, _y.shape, _w.shape))
xmin, xmax = _x.min(), _x.max()
ymin, ymax = _y.min(), _y.max()
dx = (xmax - xmin) / (nx - 1.0)
dy = (ymax - ymin) / (ny - 1.0)
# Basically, this is just doing what np.digitize does with one less copy
xyi = np.vstack((_x, _y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = np.floor(xyi, xyi).T
# xyi contains the bins of each point as a 2d array [(xi,yi)]
d = {}
for e, k in enumerate(xyi.T):
key = (k[0], k[1])
if key in d:
d[key].append(_w[e])
else:
d[key] = [_w[e]]
_xyi = np.array(d.keys()).T
_w = np.array([ reduce_w(v) for v in d.values() ])
# exploit a sparse coo_matrix to build the 2D histogram...
_grid = sparse.coo_matrix((_w, _xyi), shape=(nx, ny))
if reinterp is None:
# convert sparse to array with filled value
# grid.toarray() does not account for filled value
# sparse.coo.coo_todense() does actually add the values to the existing ones, i.e. not what we want -> brute force
if NULL is None:
B = _grid.toarray()
else: # Brute force only went needed
B = np.zeros(_grid.shape, dtype=_grid.dtype)
B.fill(NULL)
for (x, y, v) in zip(_grid.col, _grid.row, _grid.data):
B[y, x] = v
else: # reinterp
xi = np.arange(nx, dtype=float)
yi = np.arange(ny, dtype=float)
B = griddata(_grid.col.astype(float), _grid.row.astype(float), _grid.data, xi, yi, interp=reinterp)
return B, (xmin, xmax, ymin, ymax), (dx, dy)
def histplot(data, bins=10, range=None, normed=False, weights=None, density=None, ax=None, **kwargs):
""" plot an histogram of data `a la R`: only bottom and left axis, with
dots at the bottom to represent the sample
Example
-------
import numpy as np
x = np.random.normal(0, 1, 1e3)
histplot(x, bins=50, density=True, ls='steps-mid')
"""
h, b = np.histogram(data, bins, range, normed, weights, density)
if ax is None:
ax = plt.gca()
x = 0.5 * (b[:-1] + b[1:])
l = ax.plot(x, h, **kwargs)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
_w = ['top', 'right']
[ ax.spines[side].set_visible(False) for side in _w ]
for wk in ['bottom', 'left']:
ax.spines[wk].set_position(('outward', 10))
ylim = ax.get_ylim()
ax.plot(data, -0.02 * max(ylim) * np.ones(len(data)), '|', color=l[0].get_color())
ax.set_ylim(-0.02 * max(ylim), max(ylim))
def scatter_plot(x, y, ellipse=False, levels=[0.99, 0.95, 0.68], color='w', ax=None, **kwargs):
if ax is None:
ax = plt.gca()
if faststats is not None:
im, e = faststats.fastkde.fastkde(x, y, (50, 50), adjust=2.)
V = im.max() * np.asarray(levels)
plt.contour(im.T, levels=V, origin='lower', extent=e, linewidths=[1, 2, 3], colors=color)
ax.plot(x, y, 'b,', alpha=0.3, zorder=-1, rasterized=True)
if ellipse is True:
data = np.vstack([x, y])
mu = np.mean(data, axis=1)
cov = np.cov(data)
error_ellipse(mu, cov, ax=plt.gca(), edgecolor="g", ls="dashed", lw=4, zorder=2)
def error_ellipse(mu, cov, ax=None, factor=1.0, **kwargs):
"""
Plot the error ellipse at a point given its covariance matrix.
"""
# some sane defaults
facecolor = kwargs.pop('facecolor', 'none')
edgecolor = kwargs.pop('edgecolor', 'k')
x, y = mu
U, S, V = np.linalg.svd(cov)
theta = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
ellipsePlot = Ellipse(xy=[x, y],
width=2 * np.sqrt(S[0]) * factor,
height=2 * np.sqrt(S[1]) * factor,
angle=theta,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if ax is None:
ax = plt.gca()
ax.add_patch(ellipsePlot)
return ellipsePlot
def bayesian_blocks(t):
"""Bayesian Blocks Implementation
By Jake Vanderplas. License: BSD
Based on algorithm outlined in http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
Parameters
----------
t : ndarray, length N
data to be histogrammed
Returns
-------
bins : ndarray
array containing the (N+1) bin edges
Notes
-----
This is an incomplete implementation: it may fail for some
datasets. Alternate fitness functions and prior forms can
be found in the paper listed above.
"""
# copy and sort the array
t = np.sort(t)
N = t.size
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1], 0.5 * (t[1:] + t[:-1]), t[-1:]])
block_length = t[-1] - edges
# arrays needed for the iteration
nn_vec = np.ones(N)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# -----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
# -----------------------------------------------------------------
for K in range(N):
# Compute the width and count of the final bin for all possible
# locations of the K^th changepoint
width = block_length[:K + 1] - block_length[K + 1]
count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]
# evaluate fitness function for these possibilities
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4 # 4 comes from the prior on the number of changepoints
fit_vec[1:] += best[:K]
# find the max of the fitness: this is the K^th changepoint
i_max = np.argmax(fit_vec)
last[K] = i_max
best[K] = fit_vec[i_max]
# -----------------------------------------------------------------
# Recover changepoints by iteratively peeling off the last block
# -----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
def quantiles(x, qlist=[2.5, 25, 50, 75, 97.5]):
"""computes quantiles from an array
Quantiles := points taken at regular intervals from the cumulative
distribution function (CDF) of a random variable. Dividing ordered data
into q essentially equal-sized data subsets is the motivation for
q-quantiles; the quantiles are the data values marking the boundaries
between consecutive subsets.
The quantile with a fraction 50 is called the median
(50% of the distribution)
Inputs:
x - variable to evaluate from
qlist - quantiles fraction to estimate (in %)
Outputs:
Returns a dictionary of requested quantiles from array
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim > 1:
# Transpose first, then sort, then transpose back
sx = np.transpose(np.sort(np.transpose(x)))
else:
# Sort univariate node
sx = np.sort(x)
try:
# Generate specified quantiles
quants = [sx[int(len(sx) * q / 100.0)] for q in qlist]
return dict(zip(qlist, quants))
except IndexError:
print("Too few elements for quantile calculation")
def get_optbins(data, method='freedman', ret='N'):
""" Determine the optimal binning of the data based on common estimators
and returns either the number of bins of the width to use.
input:
data 1d dataset to estimate from
keywords:
method the method to use: str in {sturge, scott, freedman}
ret set to N will return the number of bins / edges
set to W will return the width
refs:
Sturges, H. A. (1926)."The choice of a class interval". J. American Statistical Association, 65-66
Scott, David W. (1979), "On optimal and data-based histograms". Biometrika, 66, 605-610
Freedman, D.; Diaconis, P. (1981). "On the histogram as a density estimator: L2 theory".
Zeitschrift fur Wahrscheinlichkeitstheorie und verwandte Gebiete, 57, 453-476
Scargle, J.D. et al (2012) "Studies in Astronomical Time Series Analysis. VI. Bayesian
Block Representations."
"""
x = np.asarray(data)
n = x.size
r = x.max() - x.min()
def sturge():
if (n <= 30):
print("Warning: Sturge estimator can perform poorly for small samples")
k = int(np.log(n) + 1)
h = r / k
return h, k
def scott():
h = 3.5 * np.std(x) * float(n) ** (-1. / 3.)
k = int(r / h)
return h, k
def freedman():
q = quantiles(x, [25, 75])
h = 2 * (q[75] - q[25]) * float(n) ** (-1. / 3.)
k = int(r / h)
return h, k
def bayesian():
r = bayesian_blocks(x)
return np.diff(r),r
m = {'sturge': sturge, 'scott': scott, 'freedman': freedman,
'bayesian': bayesian}
if method.lower() in m:
s = m[method.lower()]()
if ret.lower() == 'n':
return s[1]
elif ret.lower() == 'w':
return s[0]
else:
return None
def plotMAP(x, ax=None, error=0.01, frac=[0.65,0.95, 0.975], usehpd=True,
hist={'histtype':'step'}, vlines={}, fill={},
optbins={'method':'freedman'}, *args, **kwargs):
""" Plot the MAP of a given sample and add statistical info
If not specified, binning is assumed from the error value or using
mystats.optbins if available.
if mystats module is not available, hpd keyword has no effect
inputs:
x dataset
keywords
ax axe object to use during plotting
error error to consider on the estimations
frac fractions of sample to highlight (def 65%, 95%, 97.5%)
hpd if set, uses mystats.hpd to estimate the confidence intervals
hist keywords forwarded to hist command
optbins keywords forwarded to mystats.optbins command
vlines keywords forwarded to vlines command
fill keywords forwarded to fill command
"""
_x = np.ravel(x)
if ax is None:
ax = plt.gca()
if not ('bins' in hist):
bins = get_optbins(x, method=optbins['method'], ret='N')
n, b, p = ax.hist(_x, bins=bins, *args, **hist)
else:
n, b, p = ax.hist(_x, *args, **hist)
c = 0.5 * (b[:-1] + b[1:])
# dc = 0.5 * (b[:-1] - b[1:])
ind = n.argmax()
_ylim = ax.get_ylim()
if usehpd is True:
_hpd = hpd(_x, 1 - 0.01)
ax.vlines(_hpd, _ylim[0], _ylim[1], **vlines)
for k in frac:
nx = hpd(_x, 1. - k)
ax.fill_between(nx, _ylim[0], _ylim[1], alpha=0.4 / float(len(frac)), zorder=-1, **fill)
else:
ax.vlines(c[ind], _ylim[0], _ylim[1], **vlines)
cx = c[ n.argsort() ][::-1]
cn = n[ n.argsort() ][::-1].cumsum()
for k in frac:
sx = cx[np.where(cn <= cn[-1] * float(k))]
sx = [sx.min(), sx.max()]
ax.fill_between(sx, _ylim[0], _ylim[1], alpha=0.4 / float(len(frac)), zorder=-1, **fill)
theme(ax=ax)
ax.set_xlabel(r'Values')
ax.set_ylabel(r'Counts')
def calc_min_interval(x, alpha):
"""Internal method to determine the minimum interval of
a given width"""
# Initialize interval
min_int = [None, None]
try:
# Number of elements in trace
n = len(x)
# Start at far left
start, end = 0, int(n * (1 - alpha))
# Initialize minimum width to large value
min_width = np.inf
while end < n:
# Endpoints of interval
hi, lo = x[end], x[start]
# Width of interval
width = hi - lo
# Check to see if width is narrower than minimum
if width < min_width:
min_width = width
min_int = [lo, hi]
# Increment endpoints
start += 1
end += 1
return min_int
except IndexError:
print('Too few elements for interval calculation')
return [None, None]
def getPercentileLevels(h, frac=[0.5, 0.65, 0.95, 0.975]):
"""
Return image levels that corresponds to given percentiles values
Uses the cumulative distribution of the sorted image density values
Hence this works also for any nd-arrays
inputs:
h array
outputs:
res array containing level values
keywords:
frac sample fractions (percentiles)
could be scalar or iterable
default: 50%, 65%, 95%, and 97.5%
"""
if getattr(frac, '__iter__', False):
return np.asarray( [getPercentileLevels(h, fk) for fk in frac])
if not ((frac >= 0.) & (frac < 1.)):
raise ValueError("Expecting a sample fraction in 'frac' and got %f" % frac)
# flatten the array to a 1d list
val = h.ravel()
# inplace sort
val.sort()
# reverse order
rval = val[::-1]
# cumulative values
cval = rval.cumsum()
cval = (cval - cval[0]) / (cval[-1] - cval[0])
# retrieve the largest indice up to the fraction of the sample we want
ind = np.where(cval <= cval[-1] * float(frac))[0].max()
res = rval[ind]
del val, cval, ind, rval
return res
def fastkde(x, y, gridsize=(200, 200), extents=None, nocorrelation=False,
weights=None, adjust=1.):
"""
A fft-based Gaussian kernel density estimate (KDE)
for computing the KDE on a regular grid
Note that this is a different use case than scipy's original
scipy.stats.kde.gaussian_kde
IMPLEMENTATION
--------------
Performs a gaussian kernel density estimate over a regular grid using a
convolution of the gaussian kernel with a 2D histogram of the data.
It computes the sparse bi-dimensional histogram of two data samples where
*x*, and *y* are 1-D sequences of the same length. If *weights* is None
(default), this is a histogram of the number of occurences of the
observations at (x[i], y[i]).
histogram of the data is a faster implementation than numpy.histogram as it
avoids intermediate copies and excessive memory usage!
This function is typically *several orders of magnitude faster* than
scipy.stats.kde.gaussian_kde. For large (>1e7) numbers of points, it
produces an essentially identical result.
Boundary conditions on the data is corrected by using a symmetric /
reflection condition. Hence the limits of the dataset does not affect the
pdf estimate.
Parameters
----------
x, y: ndarray[ndim=1]
The x-coords, y-coords of the input data points respectively
gridsize: tuple
A (nx,ny) tuple of the size of the output grid (default: 200x200)
extents: (xmin, xmax, ymin, ymax) tuple
tuple of the extents of output grid (default: extent of input data)
nocorrelation: bool
If True, the correlation between the x and y coords will be ignored
when preforming the KDE. (default: False)
weights: ndarray[ndim=1]
An array of the same shape as x & y that weights each sample (x_i,
y_i) by each value in weights (w_i). Defaults to an array of ones
the same size as x & y. (default: None)
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
Returns
-------
g: ndarray[ndim=2]
A gridded 2D kernel density estimate of the input points.
e: (xmin, xmax, ymin, ymax) tuple
Extents of g
"""
# Variable check
x, y = np.asarray(x), np.asarray(y)
x, y = np.squeeze(x), np.squeeze(y)
if x.size != y.size:
raise ValueError('Input x & y arrays must be the same size!')
n = x.size
if weights is None:
# Default: Weight all points equally
weights = np.ones(n)
else:
weights = np.squeeze(np.asarray(weights))
if weights.size != x.size:
raise ValueError('Input weights must be an array of the same size as input x & y arrays!')
# Optimize gridsize ------------------------------------------------------
# Make grid and discretize the data and round it to the next power of 2
# to optimize with the fft usage
if gridsize is None:
gridsize = np.asarray([np.max((len(x), 512.)), np.max((len(y), 512.))])
gridsize = 2 ** np.ceil(np.log2(gridsize)) # round to next power of 2
nx, ny = gridsize
# Make the sparse 2d-histogram -------------------------------------------
# Default extents are the extent of the data
if extents is None:
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
else:
xmin, xmax, ymin, ymax = map(float, extents)
dx = (xmax - xmin) / (nx - 1)
dy = (ymax - ymin) / (ny - 1)
# Basically, this is just doing what np.digitize does with one less copy
# xyi contains the bins of each point as a 2d array [(xi,yi)]
xyi = np.vstack((x,y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = np.floor(xyi, xyi).T
# Next, make a 2D histogram of x & y.
# Exploit a sparse coo_matrix avoiding np.histogram2d due to excessive
# memory usage with many points
grid = coo_matrix((weights, xyi), shape=(nx, ny)).toarray()
# Kernel Preliminary Calculations ---------------------------------------
# Calculate the covariance matrix (in pixel coords)
cov = np.cov(xyi)
if nocorrelation:
cov[1,0] = 0
cov[0,1] = 0
# Scaling factor for bandwidth
scotts_factor = n ** (-1.0 / 6.) * adjust # For 2D
# Make the gaussian kernel ---------------------------------------------
# First, determine the bandwidth using Scott's rule
# (note that Silvermann's rule gives the # same value for 2d datasets)
std_devs = np.sqrt(np.diag(cov))
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
# Determine the bandwidth to use for the gaussian kernel
inv_cov = np.linalg.inv(cov * scotts_factor ** 2)
# x & y (pixel) coords of the kernel grid, with <x,y> = <0,0> in center
xx = np.arange(kern_nx, dtype=np.float) - kern_nx / 2.0
yy = np.arange(kern_ny, dtype=np.float) - kern_ny / 2.0
xx, yy = np.meshgrid(xx, yy)
# Then evaluate the gaussian function on the kernel grid
kernel = np.vstack((xx.flatten(), yy.flatten()))
kernel = np.dot(inv_cov, kernel) * kernel
kernel = np.sum(kernel, axis=0) / 2.0
kernel = np.exp(-kernel)
kernel = kernel.reshape((kern_ny, kern_nx))
# Convolve the histogram with the gaussian kernel
# use boundary=symm to correct for data boundaries in the kde
grid = convolve2d(grid, kernel, mode='same', boundary='symm')
# Normalization factor to divide result by so that units are in the same
# units as scipy.stats.kde.gaussian_kde's output.
norm_factor = 2 * np.pi * cov * scotts_factor ** 2
norm_factor = np.linalg.det(norm_factor)
norm_factor = n * dx * dy * np.sqrt(norm_factor)
# Normalize the result
grid /= norm_factor
return grid, (xmin, xmax, ymin, ymax), dx, dy
def percentile(data, percentiles, weights=None):
"""Compute weighted percentiles.
If the weights are equal, this is the same as normal percentiles.
Elements of the data and wt arrays correspond to each other and must have
equal length.
If wt is None, this function calls numpy's percentile instead (faster)
Implementation
--------------
The method implemented here extends the commom percentile estimation method
(linear interpolation beteeen closest ranks) approach in a natural way.
Suppose we have positive weights, W= [W_i], associated, respectively, with
our N sorted sample values, D=[d_i]. Let S_n = Sum_i=0..n {w_i} the
the n-th partial sum of the weights. Then the n-th percentile value is
given by the interpolation between its closest values v_k, v_{k+1}:
v = v_k + (p - p_k) / (p_{k+1} - p_k) * (v_{k+1} - v_k)
where
p_n = 100/S_n * (S_n - w_n/2)
Note that the 50th weighted percentile is known as the weighted median.
Parameters
----------
data: ndarray[float, ndim=1]
data points
percentiles: ndarray[float, ndim=1]
percentiles to use. (between 0 and 100)
weights: ndarray[float, ndim=1] or None
Weights of each point in data
All the weights must be non-negative and the sum must be
greater than zero.
Returns
-------
val: ndarray
the weighted percentiles of the data.
"""
# check if actually weighted percentiles is needed
if weights is None:
return np.percentile(data, list(percentiles))
if np.equal(weights, 1.).all():
return np.percentile(data, list(percentiles))
# make sure percentiles are fractions between 0 and 1
if not np.greater_equal(percentiles, 0.0).all():
raise ValueError("Percentiles less than 0")
if not np.less_equal(percentiles, 100.0).all():
raise ValueError("Percentiles greater than 100")
# Make sure data is in correct shape
shape = np.shape(data)
n = len(data)
if (len(shape) != 1):
raise ValueError("wrong data shape, expecting 1d")
if len(weights) != n:
print(n, len(weights))
raise ValueError("weights must be the same shape as data")
if not np.greater_equal(weights, 0.0).all():
raise ValueError("Not all weights are non-negative.")
_data = np.asarray(data, dtype=float)
if hasattr(percentiles, '__iter__'):
_p = np.asarray(percentiles, dtype=float) * 0.01
else:
_p = np.asarray([percentiles * 0.01], dtype=float)
_wt = np.asarray(weights, dtype=float)
len_p = len(_p)
sd = np.empty(n, dtype=float)
sw = np.empty(n, dtype=float)
aw = np.empty(n, dtype=float)
o = np.empty(len_p, dtype=float)
i = np.argsort(_data)
np.take(_data, i, axis=0, out=sd)
np.take(_wt, i, axis=0, out=sw)
np.add.accumulate(sw, out=aw)
if not aw[-1] > 0:
raise ValueError("Nonpositive weight sum")
w = (aw - 0.5 * sw) / aw[-1]
spots = np.searchsorted(w, _p)
for (pk, s, p) in zip(range(len_p), spots, _p):
if s == 0:
o[pk] = sd[0]
elif s == n:
o[pk] = sd[n - 1]
else:
f1 = (w[s] - p) / (w[s] - w[s - 1])
f2 = (p - w[s - 1]) / (w[s] - w[s - 1])
o[pk] = sd[s - 1] * f1 + sd[s] * f2
return o
class KDE_2d(object):
def __init__(self, x, y, gridsize=(100, 100), extents=None,
nocorrelation=False, weights=None, adjust=0.5):
im, e, dx, dy = fastkde(x, y, gridsize=gridsize, extents=extents,
nocorrelation=nocorrelation, weights=weights,
adjust=adjust)
self.x = x
self.y = y
self.im = im
self.e = e
self.dx = dx
self.dy = dy
@property
def peak(self):
im = self.im
e = self.e
dx = self.dx
dy = self.dy
best_idx = (im.argmax() / im.shape[1], im.argmax() % im.shape[1])
best = (best_idx[0] * dx + e[0], best_idx[1] * dy + e[2])
return best
def nice_levels(self, N=5):
""" Generates N sigma levels from a image map
Parameters
----------
H: ndarray
values to find levels from
N: int
number of sigma levels to find
Returns
-------
lvls: sequence
Levels corresponding to 1..(N + 1) sigma levels
"""
V = 1.0 - np.exp(-0.5 * np.arange(0.5, 0.5 * (N + 1 + 0.1), 0.5) ** 2)
Hflat = self.im.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
for i, v0 in enumerate(V):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
return np.sort(V)
def percentiles_lvl(self, frac):
return getPercentileLevels(self.im, frac=frac)
def imshow(self, **kwargs):
defaults = {'origin': 'lower', 'cmap': plt.cm.Greys,
'interpolation':'nearest', 'aspect':'auto'}
defaults.update(**kwargs)
ax = kwargs.pop('ax', plt.gca())
return ax.imshow(self.im.T, extent=self.e, **defaults)
def contour(self, *args, **kwargs):
defaults = {'origin': 'lower', 'cmap': plt.cm.Greys,
'levels': np.sort(self.nice_levels())}
defaults.update(**kwargs)
ax = kwargs.pop('ax', plt.gca())
return ax.contour(self.im.T, *args, extent=self.e, **defaults)
def contourf(self, *args, **kwargs):
defaults = {'origin': 'lower', 'cmap': plt.cm.Greys,
'levels': self.nice_levels()}
defaults.update(**kwargs)
ax = kwargs.pop('ax', plt.gca())
return ax.contourf(self.im.T, *args, extent=self.e, **defaults)
def scatter(self, lvl=None, **kwargs):
defaults = {'c': '0.0', 'color':'k', 'facecolor':'k', 'edgecolor':'None'}
defaults.update(**kwargs)
xe = self.e[0] + self.dx * np.arange(0, self.im.shape[1])
ye = self.e[2] + self.dy * np.arange(0, self.im.shape[0])
x = self.x
y = self.y
if lvl is not None:
nx = np.ceil(np.interp(x, 0.5 * (xe[:-1] + xe[1:]), range(len(xe) - 1)))
ny = np.ceil(np.interp(y, 0.5 * (ye[:-1] + ye[1:]), range(len(ye) - 1)))
nh = [ self.im[nx[k], ny[k]] for k in range(len(x)) ]
ind = np.where(nh < np.min(lvl))
plt.scatter(x[ind], y[ind], **kwargs)
else:
plt.scatter(x, y, **kwargs)
def plot(self, contour={}, scatter={}, **kwargs):
# levels = np.linspace(self.im.min(), self.im.max(), 10)[1:]
levels = self.nice_levels()
c_defaults = {'origin': 'lower', 'cmap': plt.cm.Greys_r, 'levels':
levels}
c_defaults.update(**contour)
c = self.contourf(**c_defaults)
lvls = np.sort(c.levels)
s_defaults = {'c': '0.0', 'edgecolor':'None', 's':2}
s_defaults.update(**scatter)
self.scatter(lvl=[lvls], **s_defaults)
def plot_kde2d(x, y, gridsize=(100, 100), extents=None, nocorrelation=False,
weights=None, adjust=0.3, **kwargs):
kde = KDE_2d(x, y, gridsize=gridsize, extents=extents,
nocorrelation=nocorrelation, weights=weights, adjust=adjust)
kde.plot(**kwargs)
def fastkde1D(xin, gridsize=200, extents=None, weights=None, adjust=1.):
"""
A fft-based Gaussian kernel density estimate (KDE)
for computing the KDE on a regular grid
Note that this is a different use case than scipy's original
scipy.stats.kde.gaussian_kde
IMPLEMENTATION
--------------
Performs a gaussian kernel density estimate over a regular grid using a
convolution of the gaussian kernel with a 2D histogram of the data.
It computes the sparse bi-dimensional histogram of two data samples where
*x*, and *y* are 1-D sequences of the same length. If *weights* is None
(default), this is a histogram of the number of occurences of the
observations at (x[i], y[i]).
histogram of the data is a faster implementation than numpy.histogram as it
avoids intermediate copies and excessive memory usage!
This function is typically *several orders of magnitude faster* than
scipy.stats.kde.gaussian_kde. For large (>1e7) numbers of points, it
produces an essentially identical result.
**Example usage and timing**
from scipy.stats import gaussian_kde
def npkde(x, xe):
kde = gaussian_kde(x)
r = kde.evaluate(xe)
return r
x = np.random.normal(0, 1, 1e6)
%timeit fastkde1D(x)
10 loops, best of 3: 31.9 ms per loop
%timeit npkde(x, xe)
1 loops, best of 3: 11.8 s per loop
~ 1e4 speed up!!! However gaussian_kde is not optimized for this application
Boundary conditions on the data is corrected by using a symmetric /
reflection condition. Hence the limits of the dataset does not affect the
pdf estimate.
INPUTS
------
xin: ndarray[ndim=1]
The x-coords, y-coords of the input data points respectively
gridsize: int
A nx integer of the size of the output grid (default: 200x200)
extents: (xmin, xmax) tuple
tuple of the extents of output grid (default: extent of input data)
weights: ndarray[ndim=1]
An array of the same shape as x that weights each sample x_i
by w_i. Defaults to an array of ones the same size as x (default: None)
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
OUTPUTS
-------
g: ndarray[ndim=2]
A gridded 2D kernel density estimate of the input points.
e: (xmin, xmax, ymin, ymax) tuple
Extents of g
"""
# Variable check
x = np.squeeze(np.asarray(xin))
# Default extents are the extent of the data
if extents is None:
xmin, xmax = x.min(), x.max()
else:
xmin, xmax = map(float, extents)
x = x[ (x <= xmax) & (x >= xmin) ]
n = x.size
if weights is None:
# Default: Weight all points equally
weights = np.ones(n)
else:
weights = np.squeeze(np.asarray(weights))
if weights.size != x.size:
raise ValueError('Input weights must be an array of the same size as input x & y arrays!')
# Optimize gridsize ------------------------------------------------------
# Make grid and discretize the data and round it to the next power of 2
# to optimize with the fft usage
if gridsize is None:
gridsize = np.max((len(x), 512.))
gridsize = 2 ** np.ceil(np.log2(gridsize)) # round to next power of 2
nx = gridsize
# Make the sparse 2d-histogram -------------------------------------------
dx = (xmax - xmin) / (nx - 1)
# Basically, this is just doing what np.digitize does with one less copy
# xyi contains the bins of each point as a 2d array [(xi,yi)]
xyi = x - xmin
xyi /= dx
xyi = np.floor(xyi, xyi)
xyi = np.vstack((xyi, np.zeros(n, dtype=int)))
# Next, make a 2D histogram of x & y.
# Exploit a sparse coo_matrix avoiding np.histogram2d due to excessive
# memory usage with many points
grid = coo_matrix((weights, xyi), shape=(nx, 1)).toarray()
# Kernel Preliminary Calculations ---------------------------------------
std_x = np.std(xyi[0])
# Scaling factor for bandwidth
scotts_factor = n ** (-1. / 5.) * adjust # For n ** (-1. / (d + 4))
# Silvermann n * (d + 2) / 4.)**(-1. / (d + 4)).
# Make the gaussian kernel ---------------------------------------------
# First, determine the bandwidth using Scott's rule
# (note that Silvermann's rule gives the # same value for 2d datasets)
kern_nx = np.round(scotts_factor * 2 * np.pi * std_x)
# Then evaluate the gaussian function on the kernel grid
kernel = np.reshape(gaussian(kern_nx, scotts_factor * std_x), (kern_nx, 1))
# ---- Produce the kernel density estimate --------------------------------
# Convolve the histogram with the gaussian kernel
# use symmetric padding to correct for data boundaries in the kde
npad = np.min((nx, 2 * kern_nx))
grid = np.vstack( [grid[npad: 0: -1], grid, grid[nx: nx - npad: -1]] )
grid = convolve(grid, kernel, mode='same')[npad: npad + nx]
# Normalization factor to divide result by so that units are in the same
# units as scipy.stats.kde.gaussian_kde's output.
norm_factor = 2 * np.pi * std_x * std_x * scotts_factor ** 2
norm_factor = n * dx * np.sqrt(norm_factor)
# Normalize the result
grid /= norm_factor
return np.squeeze(grid), (xmin, xmax), dx
class KDE_1d(object):
def __init__(self,x, gridsize=200, extents=None, weights=None, adjust=1.):
im, e, dx = fastkde1D(x,gridsize=gridsize, extents=extents,
weights=weights, adjust=adjust)
self.x = x
self.dx = dx
self.im = im
self.e = e
@property
def peak(self):
im = self.im
e = self.e
dx = self.dx
best = im.argmax() * dx + e[0]
return best
def add_markers(self, ax=None, where=0.0, orientation='horizontal',
jitter=0, **kwargs):
if ax is None:
ax = plt.gca()
# draw the positions
if 'marker' not in kwargs:
if orientation == 'horizontal':
kwargs['marker'] = '|'
else:
kwargs['marker'] = '_'
if ('facecolor' not in kwargs.keys()) | ('fc' not in kwargs.keys()) | \
('markerfacecolor' not in kwargs.keys()) | ('mfc' not in kwargs.keys()):
kwargs['markerfacecolor'] = 'None'
if ('edgecolor' not in kwargs.keys()) | ('ec' not in kwargs.keys()) | \
('markeredgecolor' not in kwargs.keys()) | ('mec' not in kwargs.keys()):
kwargs['markeredgecolor'] = 'k'
if ('linestyle' not in kwargs.keys()) | ('ls' not in kwargs.keys()):
kwargs['linestyle'] = 'None'
if ('size' not in kwargs.keys()) | ('markersize' not in kwargs.keys()):
kwargs['markersize'] = 3
if orientation == 'horizontal':
# Draw the lines
if jitter > 0:
pos = np.random.uniform(low=float(where - jitter),
high=float(where + jitter),
size=len(self.x))
ax.plot(self.x, pos, **kwargs)
else:
ax.plot(self.x, float(where) * np.ones(len(self.x)), **kwargs)
plt.draw_if_interactive()
elif orientation == 'vertical':
# Draw the lines
if jitter > 0.:
pos = np.random.uniform(low=float(where - jitter),
high=float(where + jitter),
size=len(self.x))
ax.plot(pos, self.x, **kwargs)
else:
ax.plot(float(where) * np.ones(len(self.x)), self.x, marker='_',
**kwargs)
plt.draw_if_interactive()
def plot(self, ax=None, orientation='horizontal', cutoff=False, log=False,
cutoff_type='std', cutoff_val=1.5, pos=100, pos_marker='line',
pos_width=0.05, pos_kwargs={}, **kwargs):
if ax is None:
ax = plt.gca()
# Draw the violin.
if ('facecolor' not in kwargs) | ('fc' not in kwargs):
kwargs['facecolor'] = 'y'
if ('edgecolor' not in kwargs) | ('ec' not in kwargs):
kwargs['edgecolor'] = 'k'
if ('alpha' not in kwargs.keys()):
kwargs['alpha'] = 0.5
if 'color' in kwargs:
kwargs['edgecolor'] = kwargs['color']
kwargs['facecolor'] = kwargs['color']
# Kernel density estimate for data at this position.
violin, e = self.im, self.e
xvals = np.linspace(e[0], e[1], len(violin))
xvals = np.hstack(([xvals[0]], xvals, [xvals[-1]]))
violin = np.hstack(([0], violin, [0]))
if orientation == 'horizontal':
ax.fill(xvals, violin, **kwargs)
elif orientation == 'vertical':
ax.fill_betweenx(xvals, 0, violin, **kwargs)
plt.draw_if_interactive()
def plot_kde1d(x, gridsize=200, extents=None, weights=None, adjust=1.,
**kwargs):
return KDE_1d(x, gridsize=gridsize, extents=extents, weights=weights,
adjust=adjust).plot(**kwargs)
def plotDensity(x,y, bins=100, ax=None, Nlevels=None, levels=None,
frac=None,
contour={'colors':'0.0', 'linewidths':0.5},
contourf={'cmap': plt.cm.Greys_r},
scatter={'c':'0.0', 's':0.5, 'edgecolor':'None'},
*args, **kwargs ):
"""
Plot a the density of x,y given certain contour paramters and includes
individual points (not represented by contours)
inputs:
x,y data to plot
keywords:
bins bin definition for the density histogram
ax use a specific axis
Nlevels the number of levels to use with contour
levels levels
frac percentiles to contour if specified
Extra keywords:
*args, **kwargs forwarded to histogram2d
**contour forwarded to contour function
**contourf forwarded to contourf function
**plot forwarded to contourf function
"""
if ax is None:
ax = plt.gca()
if 'bins' not in kwargs:
kwargs['bins'] = bins
h, xe, ye = np.histogram2d(x, y, *args, **kwargs)
if (Nlevels is None) & (levels is None) & (frac is None):
levels = np.sort(getPercentileLevels(h))
elif (Nlevels is not None) & (levels is None) & (frac is None):
levels = np.linspace(2., h.max(), Nlevels)[1:].tolist() + [h.max()]
elif (frac is not None):
levels = getPercentileLevels(h, frac=frac)
if not getattr(levels, '__iter__', False):
raise AttributeError("Expecting levels variable to be iterable")
if levels[-1] != h.max():
levels = list(levels) + [h.max()]
if isinstance(contourf, dict):
cont = ax.contourf(h.T, extent=[xe[0],xe[-1], ye[0],ye[-1]],
levels=levels, **contourf)
else:
cont = None
if isinstance(contour, dict):
ax.contour(h.T, extent=[xe[0],xe[-1], ye[0],ye[-1]], levels=levels,
**contour)
ind = np.asarray([False] * len(x))
if cont is not None:
nx = np.ceil(np.interp(x, 0.5 * (xe[:-1] + xe[1:]), range(len(xe) - 1)))
ny = np.ceil(np.interp(y, 0.5 * (ye[:-1] + ye[1:]), range(len(ye) - 1)))
nh = [ h[nx[k],ny[k]] for k in range(len(x)) ]
ind = np.where(nh < np.min(levels))
ax.scatter(x[ind], y[ind], **scatter)
else:
ax.plot(x, y, **scatter)
def make_indices(dimensions):
""" Generates complete set of indices for given dimensions """
level = len(dimensions)
if level == 1:
return range(dimensions[0])
indices = [[]]
while level:
_indices = []
for j in range(dimensions[level - 1]):
_indices += [[j] + i for i in indices]
indices = _indices
level -= 1
try:
return [tuple(i) for i in indices]
except TypeError:
return indices
def hpd(x, alpha):
"""Calculate HPD (minimum width BCI) of array for given alpha"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim > 1:
# Transpose first, then sort
tx = np.transpose(x, range(x.ndim)[1:] + [0])
dims = np.shape(tx)
# Container list for intervals
intervals = np.resize(0.0, dims[:-1] + (2,))
for index in make_indices(dims[:-1]):
try:
index = tuple(index)
except TypeError:
pass
# Sort trace
sx = np.sort(tx[index])
# Append to list
intervals[index] = calc_min_interval(sx, alpha)
# Transpose back before returning
return np.array(intervals)
def plotCorr(l, pars, plotfunc=None, lbls=None, limits=None, triangle='lower',
devectorize=False, *args, **kwargs):
""" Plot correlation matrix between variables
inputs
-------
l: dict
dictionary of variables (could be a Table)
pars: sequence of str
parameters to use
plotfunc: callable
function to be called when doing the scatter plots
lbls: sequence of str
sequence of string to use instead of dictionary keys
limits: dict
impose limits for some paramters. Each limit should be pairs of values.
No need to define each parameter limits
triangle: str in ['upper', 'lower']
Which side of the triangle to use.
devectorize: bool
if set, rasterize the figure to reduce its size
*args, **kwargs are forwarded to the plot function
Example
-------
import numpy as np
figrc.ezrc(16, 1, 16, 5)
d = {}
for k in range(4):
d[k] = np.random.normal(0, k+1, 1e4)
plt.figure(figsize=(8 * 1.5, 7 * 1.5))
plotCorr(d, d.keys(), plotfunc=figrc.scatter_plot)
#plotCorr(d, d.keys(), alpha=0.2)
"""
if lbls is None:
lbls = pars
if limits is None:
limits = {}
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
if not len(pars) - 1 in fontmap:
fontmap[len(pars) - 1] = 3
k = 1
axes = np.empty((len(pars) + 1, len(pars)), dtype=object)
for j in range(len(pars)):
for i in range(len(pars)):
if j > i:
sharex = axes[j - 1, i]
else:
sharex = None
if i == j:
# Plot the histograms.
ax = plt.subplot(len(pars), len(pars), k)
axes[j, i] = ax
data = l[pars[i]]
n, b, p = ax.hist(data, bins=50, histtype="step", color=kwargs.get("color", "k"))
if triangle == 'upper':
ax.set_xlabel(lbls[i])
ax.set_ylabel(lbls[i])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('none')
else:
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('bottom')
hide_axis(['right', 'top', 'left'], ax=ax)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), visible=False)
xlim = limits.get(pars[i], (data.min(), data.max()))
ax.set_xlim(xlim)
if triangle == 'upper':
data_x = l[pars[i]]
data_y = l[pars[j]]
if i > j:
if i > j + 1:
sharey = axes[j, i - 1]
else:
sharey = None
ax = plt.subplot(len(pars), len(pars), k, sharey=sharey, sharex=sharex)
axes[j, i] = ax
if plotfunc is None:
plt.plot(data_x, data_y, ',', **kwargs)
else:
plotfunc(data_x, data_y, ax=ax, *args, **kwargs)
xlim = limits.get(pars[i], None)
ylim = limits.get(pars[j], None)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), visible=False)
if devectorize is True:
devectorize_axes(ax=ax)
if triangle == 'lower':
data_x = l[pars[i]]
data_y = l[pars[j]]
if i < j:
if i < j:
sharey = axes[j, i - 1]
else:
sharey = None
ax = plt.subplot(len(pars), len(pars), k, sharey=sharey, sharex=sharex)
axes[j, i] = ax
if plotfunc is None:
plt.plot(data_x, data_y, ',', **kwargs)
else:
plotfunc(data_x, data_y, ax=ax, *args, **kwargs)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), visible=False)
xlim = limits.get(pars[i], None)
ylim = limits.get(pars[j], None)
if xlim is not None:
ax.set_ylim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if devectorize is True:
devectorize_axes(ax=ax)
if i == 0:
ax.set_ylabel(lbls[j])
plt.setp(ax.get_yticklabels(), visible=True)
if j == len(pars) - 1:
ax.set_xlabel(lbls[i])
plt.setp(ax.get_xticklabels(), visible=True)
N = int(0.5 * fontmap[len(pars) - 1])
if N <= 4:
N = 5
setNmajors(N, N, ax=ax, prune='both')
k += 1
setMargins(hspace=0.0, wspace=0.0)
def hinton(W, bg='grey', facecolors=('w', 'k')):
"""Draw a hinton diagram of the matrix W on the current pylab axis
Hinton diagrams are a way of visualizing numerical values in a matrix/vector,
popular in the neural networks and machine learning literature. The area
occupied by a square is proportional to a value's magnitude, and the colour
indicates its sign (positive/negative).
Example usage:
R = np.random.normal(0, 1, (2,1000))
h, ex, ey = np.histogram2d(R[0], R[1], bins=15)
hh = h - h.T
hinton.hinton(hh)
"""
M, N = W.shape
square_x = np.array([-.5, .5, .5, -.5])
square_y = np.array([-.5, -.5, .5, .5])
ioff = False
if plt.isinteractive():
plt.ioff()
ioff = True
plt.fill([-.5, N - .5, N - .5, - .5], [-.5, -.5, M - .5, M - .5], bg)
Wmax = np.abs(W).max()
for m, Wrow in enumerate(W):
for n, w in enumerate(Wrow):
c = plt.signbit(w) and facecolors[1] or facecolors[0]
plt.fill(square_x * w / Wmax + n, square_y * w / Wmax + m, c, edgecolor=c)
plt.ylim(-0.5, M - 0.5)
plt.xlim(-0.5, M - 0.5)
if ioff is True:
plt.ion()
plt.draw_if_interactive()
def parallel_coordinates(d, labels=None, orientation='horizontal',
positions=None, ax=None, **kwargs):
""" Plot parallel coordinates of a data set
Each dimension is normalized and then plot either vertically or horizontally
Parameters
----------
d: ndarray, recarray or dict
data to plot (one column or key per coordinate)
labels: sequence
sequence of string to use to define the label of each coordinate
default p{:d}
orientation: str
'horizontal' of 'vertical' to set the plot orientation accordingly
positions: sequence(float)
position of each plane on the main axis. Default is equivalent to
equidistant positioning.
ax: plt.Axes instance
axes to use for the figure, default plt.subplot(111)
**kwargs: dict
forwarded to :func:`plt.plot`
"""
if labels is None:
if hasattr(d, 'keys'):
names = list(d.keys())
data = np.array(d.values()).T
elif hasattr(d, 'dtype'):
if d.dtype.names is not None:
names = d.dtype.names
else:
names = [ 'p{0:d}'.format(k) for k in range(len(d[0])) ]
else:
names = [ 'p{0:d}'.format(k) for k in range(len(d)) ]
data = np.array(d).astype(float)
else:
names = labels
data = np.array(d).astype(float)
if len(labels) != len(data[0]):
names = [ 'p{0:d}'.format(k) for k in range(len(data[0])) ]
if positions is None:
positions = np.arange(len(names))
else:
positions = np.array(positions)
positions -= positions.min()
dyn = np.ptp(positions)
data = (data - data.mean(axis=0)) / data.ptp(axis=0)[None, :]
order = np.argsort(positions)
data = data[:, order]
positions = positions[order]
names = np.array(names)[order].tolist()
if ax is None:
ax = plt.subplot(111)
if orientation.lower() == 'horizontal':
ax.vlines(positions, -1, 1, color='0.8')
ax.plot(positions, data.T, **kwargs)
hide_axis(['left', 'right', 'top', 'bottom'], ax=ax)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xticks(positions)
ax.set_xticklabels(names)
ax.set_xlim(positions.min() - 0.1 * dyn, positions.max() + 0.1 * dyn)
else:
ax.hlines(positions, -1, 1, color='0.8')
ax.plot(data.T, positions, **kwargs)
hide_axis(['left', 'right', 'top', 'bottom'], ax=ax)
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_yticks(positions)
ax.set_yticklabels(names)
ax.set_ylim(positions.min() - 0.1 * dyn, positions.max() + 0.1 * dyn)
def raw_string(seq):
""" make a sequence of strings raw to avoid latex interpretation """
def f(s):
""" Filter latex """
r = s.replace('\\', '\\\\').replace('_', '\_').replace('^', '\^')
return r
return [ f(k) for k in seq ]
def get_centers_from_bins(bins):
""" return centers from bin sequence """
return 0.5 * (bins[:-1] + bins[1:])
def nice_sigma_levels(im, sigs=[1, 2, 3]):
""" Generates N sigma levels from a image map
Parameters
----------
H: ndarray
values to find levels from
sigs: sequence
sigma levels to find
Returns
-------
lvls: sequence
Levels values corresponding to requested sigmas
"""
V = 1.0 - np.exp(-0.5 * np.array(sigs) ** 2)
Hflat = im.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
for i, v0 in enumerate(V):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
return V
def nice_levels(H, N=5):
""" Generates N sigma levels from a image map
Parameters
----------
H: ndarray
values to find levels from
N: int
number of sigma levels to find
Returns
-------
lvls: sequence
Levels corresponding to 1..(N + 1) sigma levels
"""
V = 1.0 - np.exp(-0.5 * np.arange(0.5, 0.5 * (N + 1 + 0.1), 0.5) ** 2)
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
for i, v0 in enumerate(V):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
return V
def plot_density_map(x, y, xbins, ybins, Nlevels=4, cbar=True, weights=None):
Z = np.histogram2d(x, y, bins=(xbins, ybins), weights=weights)[0].astype(float).T
# central values
lt = get_centers_from_bins(xbins)
lm = get_centers_from_bins(ybins)
cX, cY = np.meshgrid(lt, lm)
X, Y = np.meshgrid(xbins, ybins)
im = plt.pcolor(X, Y, Z, cmap=plt.cm.Blues)
plt.contour(cX, cY, Z, levels=nice_levels(Z, Nlevels), cmap=plt.cm.Greys_r)
if cbar:
cb = plt.colorbar(im)
else:
cb = None
plt.xlim(xbins[0], xbins[-1])
plt.ylim(ybins[0], ybins[-1])
try:
plt.tight_layout()
except Exception as e:
print(e)
return plt.gca(), cb
def triangle_plot(d, keys, bins=None, **kwargs):
"""
Plot density maps all elements of gx against all elements of gy
Parameters
----------
d: dictionnary like structure
data structure
keys: sequence(str)
keys from d to plot
bins: sequence, optional
bins to use per dimension
default is adapted from the stddev
labels: sequence(str)
string to use as labels on the plots
usekde: bool, optional, default: true
if set use KDE to estimate densities, histograms otherwise
tickrotation: float, optional, default: 0
rotate the tick labels on the x-axis
gaussian_ellipse: bool, optional, default: True
if set, display the Gaussian error ellipse on top of each plot
hpd: bool, optional, default: True
if set display 1 and 3 sigma equivalent range on the 1d pdfs
weights: ndarray
weights to apply to each point
returns
-------
axes: sequence
all axes defined in the plot
.. note::
Other parameters are forwarded to :func:`plot_density_map`
1d pdfs calls :func:`plot_1d_PDFs`
"""
_drop = []
ncols = len(keys)
nlines = len(keys)
shape = (nlines, ncols)
axes = np.empty((nlines, ncols), dtype=object)
lbls = kwargs.pop('labels', keys)
usekde = kwargs.pop('usekde', True)
ticksrotation = kwargs.pop('ticksrotation', 0)
# gaussian_corner = kwargs.pop('gaussian_corner', False)
gaussian_ellipse = kwargs.pop('gaussian_ellipse', False)
hpd = kwargs.pop('hpd', True)
weights = kwargs.pop('weights', None)
max_n_ticks = kwargs.pop('max_n_ticks', 5)
if bins is None:
bins = []
for k in keys:
x = d[k]
dx = 0.1 * np.std(x)
bins.append(np.arange(x.min() - 2 * dx, x.max() + 2 * dx, dx))
else:
if not hasattr(bins, '__iter__'):
bins = [bins] * len(keys)
if len(bins) != len(keys):
raise AttributeError('bins are not the same length as dimensions')
for k in range(nlines * ncols):
yk, xk = np.unravel_index(k, shape)
kxk = keys[xk]
kyk = keys[yk]
if (xk >= 0) and (yk > xk):
sharex = axes[xk, xk]
sharey = axes[yk, 0]
else:
sharey = None
sharex = None
ax = plt.subplot(nlines, ncols, k + 1, sharey=sharey, sharex=sharex)
if yk >= xk:
axes[yk, xk] = ax
# elif not gaussian_corner:
else:
_drop.append(ax)
if (yk > xk):
if usekde:
kde = KDE_2d(d[kxk], d[kyk],
gridsize=(len(bins[xk]),len(bins[yk])),
adjust=0.5, weights=weights)
kde.imshow()
kde.contour(cmap=plt.cm.Greys_r)
else:
plot_density_map(d[kxk], d[kyk], bins[xk], bins[yk], cbar=False,
weights=weights, **kwargs)
if gaussian_ellipse & (weights is None):
data = np.vstack([d[kxk], d[kyk]])
mu = np.mean(data, axis=1)
cov = np.cov(data - mu[:, None])
error_ellipse(mu, cov, ax=ax, edgecolor="r", lw=2)
if xk == 0:
ax.set_ylabel(lbls[yk])
[l.set_rotation(ticksrotation) for l in ax.get_yticklabels()]
else:
plt.setp(ax.get_yticklabels(), visible=False)
if yk == nlines - 1:
ax.set_xlabel(lbls[xk])
[l.set_rotation(ticksrotation) for l in ax.get_xticklabels()]
else:
plt.setp(ax.get_xticklabels(), visible=False)
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.set_ylim(bins[yk][0], bins[yk][-1])
ax.set_xlim(bins[xk][0], bins[xk][-1])
ax.xaxis.set_label_coords(0.5, -0.6)
ax.yaxis.set_label_coords(-0.6, 0.5)
elif (yk == xk):
if usekde:
kde = KDE_1d(d[kxk], gridsize=len(bins[xk]), weights=weights)
kde.plot(ec='k', alpha=0.8, lw=2, fc='w', facecolor='w')
plt.vlines(kde.peak, ymin=0, ymax=kde.im.max(), color='r')
else:
n, _ = np.histogram(d[kxk], bins=bins[xk], weights=weights)
xn = get_centers_from_bins(bins[xk])
ax.fill_between(xn, [0] * len(xn), n.astype(float), edgecolor='k',
facecolor='w', alpha=0.8, lw=2)
plt.vlines([xn[n.argmax()]], ymin=0, ymax=n.max(), color='r', lw=1, zorder=10)
# ax.set_xlim(bins[xk][0], bins[xk][1])
if hpd is True:
ylim = ax.get_ylim()
xn = percentile(d[kxk], (0.1, 15.7, 50, 84.3, 99.9), weights=weights)
plt.vlines(xn, ymin=0, ymax=ylim[1], color='b', zorder=-10)
plt.fill_between([xn[0], xn[2]], [0] * 2, [ylim[1]] * 2, color='b', alpha=0.05, zorder=-10)
plt.fill_between([xn[1], xn[2]], [0] * 2, [ylim[1]] * 2, color='b', alpha=0.1, zorder=-10)
plt.fill_between([xn[2], xn[-1]], [0] * 2, [ylim[1]] * 2, color='b', alpha=0.05, zorder=-10)
plt.fill_between([xn[2], xn[-2]], [0] * 2, [ylim[1]] * 2, color='b', alpha=0.1, zorder=-10)
ax.set_ylim(*ylim)
plt.setp(ax.get_yticklabels(), visible=False)
if yk == nlines - 1:
ax.set_xlabel(lbls[xk])
[l.set_rotation(ticksrotation) for l in ax.get_xticklabels()]
else:
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
hide_axis(where=['top', 'left', 'right'], ax=ax)
ax.set_xlim(bins[xk][0], bins[xk][-1])
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.xaxis.set_label_coords(0.5, -0.6)
try:
plt.tight_layout()
except Exception as e:
print(e)
for ax in _drop:
ax.set_visible(False)
plt.subplots_adjust(hspace=0.05, wspace=0.05)
return [k for k in axes.ravel() if k is not None]
def plot_1d_PDFs(d, lbls, ticksrotation=45, hpd=True,
figout=None, usekde=True, bins=None, weights=None, ncols=4,
**kwargs):
"""
Parameters
----------
d: dictionnary like structure
data structure
keys: sequence(str)
keys from d to plot
bins: sequence, optional
bins to use per dimension
default is adapted from the stddev
labels: sequence(str)
string to use as labels on the plots
usekde: bool, optional, default: true
if set use KDE to estimate densities, histograms otherwise
tickrotation: float, optional, default: 0
rotate the tick labels on the x-axis
hpd: bool, optional, default: True
if set display 1 and 3 sigma equivalent range on the 1d pdfs
weights: ndarray
weights to apply on each point
ncols: int
number of columns
returns
-------
axes: sequence
all axes defined in the plot
"""
lbls = list(lbls)
if bins is None:
bins = []
for k in lbls:
x = d[k]
dx = 0.2 * np.std(x)
bins.append(np.arange(x.min() - 2 * dx, x.max() + 2 * dx, dx))
else:
if not hasattr(bins, '__iter__'):
bins = [bins] * len(lbls)
if len(bins) != len(lbls):
raise AttributeError('bins are not the same length as dimensions')
xlabels = kwargs.pop('labels', lbls)
ndim = len(lbls)
nlines = max(1, ndim // ncols + int(ndim % (ndim // ncols) > 0))
if nlines * ncols < len(lbls):
nlines += 1
plt.figure(figsize=( 4 * ncols, 4 * nlines ))
axes = []
for xk, kxk in enumerate(lbls):
ax = plt.subplot(nlines, ncols, xk + 1)
if usekde:
kde = KDE_1d(d[kxk], gridsize=len(bins[xk]), weights=weights)
kde.plot(ec='k', alpha=0.8, lw=2, fc='w', facecolor='w')
plt.vlines(kde.peak, ymin=0, ymax=kde.im.max(), color='r')
else:
n, _ = np.histogram(d[kxk], bins=bins[xk], weights=weights)
xn = get_centers_from_bins(bins[xk])
ax.fill_between(xn, [0] * len(xn), n.astype(float), edgecolor='k',
facecolor='w', alpha=0.8, lw=2)
plt.vlines([xn[n.argmax()]], ymin=0, ymax=n.max(), color='r', lw=1, zorder=10)
if hpd is True:
ylim = ax.get_ylim()
xn = percentile(d[kxk], (0.1, 15.7, 50, 84.3, 99.9), weights=weights)
plt.vlines(xn, ymin=0, ymax=ylim[1], color='b', zorder=-10)
plt.fill_between([xn[0], xn[2]], [0] * 2, [ylim[1]] * 2, color='b', alpha=0.05, zorder=-10)
plt.fill_between([xn[1], xn[2]], [0] * 2, [ylim[1]] * 2, color='b', alpha=0.1, zorder=-10)
plt.fill_between([xn[2], xn[-1]], [0] * 2, [ylim[1]] * 2, color='b', alpha=0.05, zorder=-10)
plt.fill_between([xn[2], xn[-2]], [0] * 2, [ylim[1]] * 2, color='b', alpha=0.1, zorder=-10)
ax.set_ylim(*ylim)
plt.setp(ax.get_yticklabels(), visible=False)
[l.set_rotation(ticksrotation) for l in ax.get_xticklabels()]
hide_axis(where=['top', 'left', 'right'], ax=ax)
ax.set_xlabel(xlabels[xk])
ax.set_ylim(0, ax.get_ylim()[1])
axes.append(ax)
try:
tight_layout()
except Exception as e:
print(e)
return axes
# =============================================================================
# Implementing THEMES in plt
# =============================================================================
class Theme(object):
"""This is an abstract base class for themes.
In general, only complete themes should should subclass this class.
Notes
-----
When subclassing there are really only two methods that need to be
implemented.
__init__: This should call super().__init__ which will define
self._rcParams. Subclasses should customize self._rcParams after calling
super().__init__. That will ensure that the rcParams are applied at
the appropriate time.
The other method is apply_theme(ax). This method takes an axes object that
has been created during the plot process. The theme should modify the
axes according.
"""
_allowed_keys = plt.rcParams.keys()
def __init__(self, *args, **kwargs):
"""
Provide ggplot2 themeing capabilities.
Parameters
-----------
kwargs**: theme_element
kwargs are theme_elements based on http://docs.ggplot2.org/current/theme.html.
Currently only a subset of the elements are implemented. In addition,
Python does not allow using '.' in argument names, so we are using '_'
instead.
For example, ggplot2 axis.ticks.y will be axis_ticks_y in Python ggplot.
"""
self._rcParams = {}
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).items():
self.__setitem__(k, v)
def __setitem__(self, k, v):
if k in self._allowed_keys:
self._rcParams[k] = v
def keys(self, regexp=None, full_match=False):
"""
Return the data column names or a subset of it
Parameters
----------
regexp: str
pattern to filter the keys with
full_match: bool
if set, use :func:`re.fullmatch` instead of :func:`re.match`
Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found.
returns
-------
seq: sequence
sequence of keys
"""
keys = list(sorted(self._rcParams.keys()))
if (regexp is None) or (regexp == '*'):
return keys
elif type(regexp) in basestring:
if full_match is True:
fn = re.fullmatch
else:
fn = re.match
if regexp.count(',') > 0:
_re = regexp.split(',')
elif regexp.count(' ') > 0:
_re = regexp.split()
else:
_re = [regexp]
_keys = []
for _rk in _re:
_keys += [k for k in keys if (fn(_rk, k) is not None)]
return _keys
elif hasattr(regexp, '__iter__'):
_keys = []
for k in regexp:
_keys += self.keys(k)
return _keys
else:
raise ValueError('Unexpected type {0} for regexp'.format(type(regexp)))
def apply_theme(self, ax):
"""apply_theme will be called with an axes object after plot has completed.
Complete themes should implement this method if post plot themeing is
required.
"""
pass
def get_rcParams(self):
"""Get an rcParams dict for this theme.
Notes
-----
Subclasses should not need to override this method method as long as
self._rcParams is constructed properly.
rcParams are used during plotting. Sometimes the same theme can be
achieved by setting rcParams before plotting or a post_plot_callback
after plotting. The choice of how to implement it is is a matter of
convenience in that case.
There are certain things can only be themed after plotting. There
may not be an rcParam to control the theme or the act of plotting
may cause an entity to come into existence before it can be themed.
"""
rcParams = deepcopy(self._rcParams)
return rcParams
def __add__(self, other):
if isinstance(other, Theme):
theme = deepcopy(self)
theme.update(**other.get_rcParams())
return theme
else:
raise TypeError()
def post_callback(self, *args, **kwargs):
pass
def apply(self):
self._rcstate = deepcopy(plt.rcParams)
plt.rcParams.update(**self.get_rcParams())
def restore(self):
plt.rcParams.update(self._rcstate)
def __enter__(self):
self._rcstate = deepcopy(plt.rcParams)
plt.rcParams.update(**self.get_rcParams())
return self
def __exit__(self, *args, **kwargs):
self.post_callback()
plt.rcParams.update(self._rcstate)
def __call__(self, *args, **kwargs):
return self.post_callback(*args, **kwargs)
class Theme_Seaborn(Theme):
"""
Theme for seaborn.
Copied from mwaskom's seaborn:
https://github.com/mwaskom/seaborn/blob/master/seaborn/rcmod.py
Parameters
----------
style: whitegrid | darkgrid | nogrid | ticks
Style of axis background.
context: notebook | talk | paper | poster
Intended context for resulting figures.
gridweight: extra heavy | heavy | medium | light
Width of the grid lines. None
"""
def __init__(self, style="whitegrid", gridweight=None, context="notebook"):
super(self.__class__, self).__init__()
self.style = style
self.gridweight = gridweight
self.context = context
self._set_theme_seaborn_rcparams(self._rcParams, self.style,
self.gridweight, self.context)
def _set_theme_seaborn_rcparams(self, rcParams, style, gridweight, context):
"""helper method to set the default rcParams and other theming relevant
things
"""
# select grid line width:
gridweights = {'extra heavy': 1.5,
'heavy': 1.1,
'medium': 0.8,
'light': 0.5, }
if gridweight is None:
if context == "paper":
glw = gridweights["medium"]
else:
glw = gridweights['extra heavy']
elif np.isreal(gridweight):
glw = gridweight
else:
glw = gridweights[gridweight]
if style == "darkgrid":
lw = .8 if context == "paper" else 1.5
ax_params = {"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"axes.linewidth": 0,
"axes.grid": True,
"axes.axisbelow": True,
"grid.color": "w",
"grid.linestyle": "-",
"grid.linewidth": glw}
elif style == "whitegrid":
lw = 1.0 if context == "paper" else 1.7
ax_params = {"axes.facecolor": "white",
"axes.edgecolor": "#CCCCCC",
"axes.linewidth": lw,
"axes.grid": True,
"axes.axisbelow": True,
"grid.color": "#DDDDDD",
"grid.linestyle": "-",
"grid.linewidth": glw}
elif style == "nogrid":
ax_params = {"axes.grid": False,
"axes.facecolor": "white",
"axes.edgecolor": "black",
"axes.linewidth": 1}
elif style == "ticks":
ticksize = 3. if context == "paper" else 6.
tickwidth = .5 if context == "paper" else 1
ax_params = {"axes.grid": False,
"axes.facecolor": "white",
"axes.edgecolor": "black",
"axes.linewidth": 1,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.major.width": tickwidth,
"ytick.major.width": tickwidth,
"xtick.minor.width": tickwidth,
"xtick.minor.width": tickwidth,
"xtick.major.size": ticksize,
"xtick.minor.size": ticksize / 2,
"ytick.major.size": ticksize,
"ytick.minor.size": ticksize / 2}
else:
ax_params = {}
rcParams.update(ax_params)
# Determine the font sizes
if context == "talk":
font_params = {"axes.labelsize": 16,
"axes.titlesize": 19,
"xtick.labelsize": 14,
"ytick.labelsize": 14,
"legend.fontsize": 13,
}
elif context == "notebook":
font_params = {"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
}
elif context == "poster":
font_params = {"axes.labelsize": 18,
"axes.titlesize": 22,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"legend.fontsize": 16,
}
elif context == "paper":
font_params = {"axes.labelsize": 8,
"axes.titlesize": 12,
"xtick.labelsize": 8,
"ytick.labelsize": 8,
"legend.fontsize": 8,
}
rcParams.update(font_params)
# Set other parameters
rcParams.update({
"lines.linewidth": 1.1 if context == "paper" else 1.4,
"patch.linewidth": .1 if context == "paper" else .3,
"xtick.major.pad": 3.5 if context == "paper" else 7,
"ytick.major.pad": 3.5 if context == "paper" else 7, })
rcParams["timezone"] = "UTC"
rcParams["patch.antialiased"] = "True"
rcParams["font.family"] = "sans-serif"
rcParams["font.size"] = "12.0"
rcParams["font.serif"] = ["Times", "Palatino", "New Century Schoolbook",
"Bookman", "Computer Modern Roman",
"Times New Roman"]
rcParams["font.sans-serif"] = ["Helvetica", "Avant Garde",
"Computer Modern Sans serif", "Arial"]
rcParams["axes.color_cycle"] = ["#333333", "348ABD", "7A68A6", "A60628",
"467821", "CF4457", "188487", "E24A33"]
rcParams["legend.fancybox"] = "True"
rcParams["figure.figsize"] = "11, 8"
rcParams["figure.facecolor"] = "1.0"
rcParams["figure.edgecolor"] = "0.50"
rcParams["figure.subplot.hspace"] = "0.5"
def apply_theme(self, ax):
""""Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have
been carried out (needs to know final tick spacing)
From: https://github.com/wrobstory/climatic/blob/master/climatic/stylers.py
"""
# Remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
# Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_markeredgewidth(1.4)
# Only show bottom left ticks
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Set minor grid lines
ax.grid(True, 'minor', color='#F2F2F2', linestyle='-', linewidth=0.7)
if not isinstance(ax.xaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
if not isinstance(ax.yaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
class Theme_538(Theme):
"""
Theme for 538.
http://dataorigami.net/blogs/napkin-folding/17543615-replicating-538s-plot-styles-in-matplotlib
"""
def __init__(self):
super(self.__class__, self).__init__()
self._rcParams["lines.linewidth"] = "2.0"
self._rcParams["patch.linewidth"] = "0.5"
self._rcParams["legend.fancybox"] = "True"
self._rcParams["axes.color_cycle"] = [ "#30a2da", "#fc4f30", "#e5ae38",
"#6d904f", "#8b8b8b"]
self._rcParams["axes.facecolor"] = "#f0f0f0"
self._rcParams["axes.labelsize"] = "large"
self._rcParams["axes.axisbelow"] = "True"
self._rcParams["axes.grid"] = "True"
self._rcParams["patch.edgecolor"] = "#f0f0f0"
self._rcParams["axes.titlesize"] = "x-large"
self._rcParams["svg.embed_char_paths"] = "path"
self._rcParams["figure.facecolor"] = "#f0f0f0"
self._rcParams["grid.linestyle"] = "-"
self._rcParams["grid.linewidth"] = "1.0"
self._rcParams["grid.color"] = "#cbcbcb"
self._rcParams["axes.edgecolor"] = "#f0f0f0"
self._rcParams["xtick.major.size"] = "0"
self._rcParams["xtick.minor.size"] = "0"
self._rcParams["ytick.major.size"] = "0"
self._rcParams["ytick.minor.size"] = "0"
self._rcParams["axes.linewidth"] = "3.0"
self._rcParams["font.size"] = "14.0"
self._rcParams["lines.linewidth"] = "4"
self._rcParams["lines.solid_capstyle"] = "butt"
self._rcParams["savefig.edgecolor"] = "#f0f0f0"
self._rcParams["savefig.facecolor"] = "#f0f0f0"
self._rcParams["figure.subplot.left"] = "0.08"
self._rcParams["figure.subplot.right"] = "0.95"
self._rcParams["figure.subplot.bottom"] = "0.07"
def apply_theme(self, ax):
'''Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have
been carried out (needs to know final tick spacing)
From: https://github.com/wrobstory/climatic/blob/master/climatic/stylers.py
'''
# Remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
# Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_markeredgewidth(1.4)
# Only show bottom left ticks
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Set minor grid lines
ax.grid(True, 'minor', color='#F2F2F2', linestyle='-', linewidth=0.7)
if not isinstance(ax.xaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
if not isinstance(ax.yaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
class Theme_Gray(Theme):
"""
Standard theme for ggplot. Gray background w/ white gridlines.
Copied from the the ggplot2 codebase:
https://github.com/hadley/ggplot2/blob/master/R/theme-defaults.r
"""
def __init__(self):
super(self.__class__, self).__init__()
self._rcParams["timezone"] = "UTC"
self._rcParams["lines.linewidth"] = "1.0"
self._rcParams["lines.antialiased"] = "True"
self._rcParams["patch.linewidth"] = "0.5"
self._rcParams["patch.facecolor"] = "348ABD"
self._rcParams["patch.edgecolor"] = "#E5E5E5"
self._rcParams["patch.antialiased"] = "True"
self._rcParams["font.family"] = "sans-serif"
self._rcParams["font.size"] = "12.0"
self._rcParams["font.serif"] = ["Times", "Palatino",
"New Century Schoolbook",
"Bookman", "Computer Modern Roman",
"Times New Roman"]
self._rcParams["font.sans-serif"] = ["Helvetica", "Avant Garde",
"Computer Modern Sans serif",
"Arial"]
self._rcParams["axes.facecolor"] = "#E5E5E5"
self._rcParams["axes.edgecolor"] = "bcbcbc"
self._rcParams["axes.linewidth"] = "1"
self._rcParams["axes.grid"] = "True"
self._rcParams["axes.titlesize"] = "x-large"
self._rcParams["axes.labelsize"] = "large"
self._rcParams["axes.labelcolor"] = "black"
self._rcParams["axes.axisbelow"] = "True"
self._rcParams["axes.color_cycle"] = ["#333333", "348ABD", "7A68A6",
"A60628",
"467821", "CF4457", "188487",
"E24A33"]
self._rcParams["grid.color"] = "white"
self._rcParams["grid.linewidth"] = "1.4"
self._rcParams["grid.linestyle"] = "solid"
self._rcParams["xtick.major.size"] = "0"
self._rcParams["xtick.minor.size"] = "0"
self._rcParams["xtick.major.pad"] = "6"
self._rcParams["xtick.minor.pad"] = "6"
self._rcParams["xtick.color"] = "#444444"
self._rcParams["xtick.direction"] = "out" # pointing out of axis
self._rcParams["ytick.major.size"] = "0"
self._rcParams["ytick.minor.size"] = "0"
self._rcParams["ytick.major.pad"] = "6"
self._rcParams["ytick.minor.pad"] = "6"
self._rcParams["ytick.color"] = "#444444"
self._rcParams["ytick.direction"] = "out" # pointing out of axis
self._rcParams["legend.fancybox"] = "True"
self._rcParams["figure.figsize"] = "11, 8"
self._rcParams["figure.facecolor"] = "1.0"
self._rcParams["figure.edgecolor"] = "0.50"
self._rcParams["figure.subplot.hspace"] = "0.5"
def apply_theme(self, ax):
'''Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have
been carried out (needs to know final tick spacing)
From: https://github.com/wrobstory/climatic/blob/master/climatic/stylers.py
'''
# Remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
# Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_markeredgewidth(1.4)
# Only show bottom left ticks
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Set minor grid lines
ax.grid(True, 'minor', color='#F2F2F2', linestyle='-', linewidth=0.7)
if not isinstance(ax.xaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
if not isinstance(ax.yaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
class Theme_Matplotlib(Theme):
def __init__(self, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.update(**plt.rcParamsDefault)
plt.ion()
class Theme_Ezrc(Theme):
def __init__(self, fontSize=16., lineWidth=1., labelSize=None,
tickmajorsize=10, tickminorsize=5, figsize=(8, 6)):
if labelSize is None:
labelSize = fontSize + 2
rcParams = {}
rcParams['figure.figsize'] = figsize
rcParams['lines.linewidth'] = lineWidth
rcParams['grid.linewidth'] = lineWidth
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['font.serif'] = ['Helvetica']
rcParams['font.family'] = ['Times New Roman']
rcParams['font.size'] = fontSize
rcParams['font.family'] = 'serif'
rcParams['font.weight'] = 'bold'
rcParams['axes.linewidth'] = lineWidth
rcParams['axes.labelsize'] = labelSize
rcParams['legend.borderpad'] = 0.1
rcParams['legend.markerscale'] = 1.
rcParams['legend.fancybox'] = False
rcParams['text.usetex'] = True
rcParams['image.aspect'] = 'auto'
rcParams['ps.useafm'] = True
rcParams['ps.fonttype'] = 3
rcParams['xtick.major.size'] = tickmajorsize
rcParams['xtick.minor.size'] = tickminorsize
rcParams['ytick.major.size'] = tickmajorsize
rcParams['ytick.minor.size'] = tickminorsize
rcParams['text.latex.preamble'] = ["\\usepackage{amsmath}"]
super(self.__class__, self).__init__(**rcParams)
plt.ion()
class Theme_Latex(Theme):
def __init__(self, fontSize=None, labelSize=None):
rcParams = {}
if fontSize is not None:
if labelSize is None:
labelSize = fontSize
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['font.serif'] = ['Helvetica']
rcParams['font.family'] = ['Times New Roman']
rcParams['font.size'] = fontSize
rcParams["axes.labelsize"] = labelSize
rcParams["axes.titlesize"] = labelSize
rcParams["xtick.labelsize"] = labelSize
rcParams["ytick.labelsize"] = labelSize
rcParams["legend.fontsize"] = fontSize
rcParams['font.family'] = 'serif'
rcParams['font.weight'] = 'bold'
rcParams['axes.labelsize'] = labelSize
rcParams['text.usetex'] = True
rcParams['ps.useafm'] = True
rcParams['ps.fonttype'] = 3
rcParams['text.latex.preamble'] = ["\\usepackage{amsmath}"]
super(self.__class__, self).__init__(**rcParams)
plt.ion()
class Theme_TightLayout(Theme):
def __init__(self, **kwargs):
self.kwargs = kwargs
super(self.__class__, self).__init__()
def post_callback(self, *args, **kwargs):
self.kwargs.update(kwargs)
tight_layout(**self.kwargs)
def declare_parula():
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import cm
cm_data = [
[0.2081, 0.1663, 0.5292],
[0.2116238095, 0.1897809524, 0.5776761905],
[0.212252381, 0.2137714286, 0.6269714286],
[0.2081, 0.2386, 0.6770857143],
[0.1959047619, 0.2644571429, 0.7279],
[0.1707285714, 0.2919380952, 0.779247619],
[0.1252714286, 0.3242428571, 0.8302714286],
[0.0591333333, 0.3598333333, 0.8683333333],
[0.0116952381, 0.3875095238, 0.8819571429],
[0.0059571429, 0.4086142857, 0.8828428571],
[0.0165142857, 0.4266, 0.8786333333],
[0.032852381, 0.4430428571, 0.8719571429],
[0.0498142857, 0.4585714286, 0.8640571429],
[0.0629333333, 0.4736904762, 0.8554380952],
[0.0722666667, 0.4886666667, 0.8467],
[0.0779428571, 0.5039857143, 0.8383714286],
[0.079347619, 0.5200238095, 0.8311809524],
[0.0749428571, 0.5375428571, 0.8262714286],
[0.0640571429, 0.5569857143, 0.8239571429],
[0.0487714286, 0.5772238095, 0.8228285714],
[0.0343428571, 0.5965809524, 0.819852381],
[0.0265, 0.6137, 0.8135],
[0.0238904762, 0.6286619048, 0.8037619048],
[0.0230904762, 0.6417857143, 0.7912666667],
[0.0227714286, 0.6534857143, 0.7767571429],
[0.0266619048, 0.6641952381, 0.7607190476],
[0.0383714286, 0.6742714286, 0.743552381],
[0.0589714286, 0.6837571429, 0.7253857143],
[0.0843, 0.6928333333, 0.7061666667],
[0.1132952381, 0.7015, 0.6858571429],
[0.1452714286, 0.7097571429, 0.6646285714],
[0.1801333333, 0.7176571429, 0.6424333333],
[0.2178285714, 0.7250428571, 0.6192619048],
[0.2586428571, 0.7317142857, 0.5954285714],
[0.3021714286, 0.7376047619, 0.5711857143],
[0.3481666667, 0.7424333333, 0.5472666667],
[0.3952571429, 0.7459, 0.5244428571],
[0.4420095238, 0.7480809524, 0.5033142857],
[0.4871238095, 0.7490619048, 0.4839761905],
[0.5300285714, 0.7491142857, 0.4661142857],
[0.5708571429, 0.7485190476, 0.4493904762],
[0.609852381, 0.7473142857, 0.4336857143],
[0.6473, 0.7456, 0.4188],
[0.6834190476, 0.7434761905, 0.4044333333],
[0.7184095238, 0.7411333333, 0.3904761905],
[0.7524857143, 0.7384, 0.3768142857],
[0.7858428571, 0.7355666667, 0.3632714286],
[0.8185047619, 0.7327333333, 0.3497904762],
[0.8506571429, 0.7299, 0.3360285714],
[0.8824333333, 0.7274333333, 0.3217],
[0.9139333333, 0.7257857143, 0.3062761905],
[0.9449571429, 0.7261142857, 0.2886428571],
[0.9738952381, 0.7313952381, 0.266647619],
[0.9937714286, 0.7454571429, 0.240347619],
[0.9990428571, 0.7653142857, 0.2164142857],
[0.9955333333, 0.7860571429, 0.196652381],
[0.988, 0.8066, 0.1793666667],
[0.9788571429, 0.8271428571, 0.1633142857],
[0.9697, 0.8481380952, 0.147452381],
[0.9625857143, 0.8705142857, 0.1309],
[0.9588714286, 0.8949, 0.1132428571],
[0.9598238095, 0.9218333333, 0.0948380952],
[0.9661, 0.9514428571, 0.0755333333],
[0.9763, 0.9831, 0.0538]]
parula_map = LinearSegmentedColormap.from_list('parula', cm_data)
cm.register_cmap('parula', cmap=parula_map)
cm.__dict__['parula'] = cm.get_cmap('parula')
parula_r_map = LinearSegmentedColormap.from_list('parula_r', cm_data[::-1])
cm.register_cmap('parula_r', cmap=parula_r_map)
cm.__dict__['parula_r'] = cm.get_cmap('parula_r')
parula_map = LinearSegmentedColormap.from_list('parulaW', [[1., 1., 1.]] + cm_data + [[1., 1., 1.]])
cm.register_cmap('parulaW', cmap=parula_map)
cm.__dict__['parulaW'] = cm.get_cmap('parulaW')
parula_r_map = LinearSegmentedColormap.from_list('parulaW_r', ([[1., 1., 1.]] + cm_data + [[1., 1., 1.]])[::-1])
cm.register_cmap('parulaW_r', cmap=parula_r_map)
cm.__dict__['parulaW_r'] = cm.get_cmap('parulaW_r')
| mfouesneau/iasbs2017 | figrc.py | Python | mit | 107,856 | [
"Gaussian"
] | 45ecc4e5037a7b428cd82cfb785375a0705787e9f96066ee5de8530944d5989a |
# Author: Michele Mattioni
# Mon Jan 26 05:54:30 GMT 2009
import math
from neuron import h, nrn
import ecellControl as eC
from synapse import Synapse
import logging
logger = logging.getLogger(__name__)
class Spine():
"""
Class spine. Create a spine with head neck and psd
"""
def __init__(self, id, filename_bioch_mod, big_spine):
""" Create a spine with a standard volume of ~0.11 um
the h is the reference to the main hoc interpreter"""
self.id = id
self.head_vol = None#0.11 #um3 calculated directly from the spine heads
self.neck = self.create_neck()
self.head = self.create_head(self.neck, self.head_vol, big_spine)
self.psd = self.create_psd(self.head)
self.parent = None # the parent section connected to the neck
self.synapses = self.create_synapses()
self.filename = filename_bioch_mod
self.k_flux = [[],[]]
# Reset ions
h.cai0_ca_ion = 0.001 #// mM, Churchill 1998
h.cao0_ca_ion = 5 #// mM, Churchill 1998 - gives eca = 100 mV
h.cali0_cal_ion = 0.001 #// mM, Churchill 1998
h.calo0_cal_ion = 5 #// mM, Churchill 1998 - gives eca = 100 mVh.cao0_ca_ion =
def setup_bio_sim(self):
"""Initialize the Biochemical Simulator creating the instance of
the object to control the simulation"""
if not hasattr(self, 'ecellMan'):
ecellMan = eC.EcellManager(self.filename)
ecellMan.createLoggers()
# Setting the head volume with the spine head
ecellMan.ses.vol = self.head_vol * 1e-15 #Converted in l
self.ecellMan = ecellMan
logger.info( "Ecell initialized in spine: %s" %self.id)
def update_calcium(self, k_ca_flux):
"""Update the calcium using the electrical calcium from the NEURON
section to the ecell compartment
The constant is in mM/ms. We need to convert to number/seconds
params:
the constant for the Constant Flux in ecell.
"""
#print "Neuron calcium: %f, Ecell Calcium: %f" %(ca_concentration,
# spine.ecellMan.ca['Value'])
# converting the concentration in molecules:
# um^3 to l (1e-15)
CUBIC_um_TO_LITER = 1e-15
# 6.022 * 1e23 Avogadro's number
N_Av = 6.022 * 1e23
# mM to M (1e-3) at the beginning for mM to M
millimolar_to_number = 1e-3 * self.head_vol * CUBIC_um_TO_LITER * N_Av
milliseconds = 1e-3
factor = millimolar_to_number / milliseconds
k_converted = k_ca_flux * factor
logger.debug( "k for the flux before unit convertion: %s and after: %s" %(k_ca_flux,
k_converted))
self.ecellMan.ca_in['k'] = k_converted
self.k_flux[0].append(h.t)
self.k_flux[1].append(k_converted)
# Disabling the leak and the pump
self.ecellMan.ca_pump['vmax'] = 0
self.ecellMan.ca_leak['vmax'] = 0
def deploy_stims(self, neuron_time_interval_resolution):
"""Create the array with the inputs for all the synapses in this spine
:param
neuron_time_interval_resolution - Resolution of the vector to record
the synapse"""
for syn in self.synapses:
inputs = []
for stim in syn.stims:
stim_inputs = stim.get_stims_time()
#print "inputs: %s" % stim_inputs
inputs.extend(stim_inputs)
stim.spine = self.id
logger.info( "Creating the stim for spine: %s syn type: %s" %(self.id, syn.chan_type))
syn.create_stimul(inputs, neuron_time_interval_resolution)
def create_neck(self):
""" Create the neck with the Grunditz value"""
name_sec = self.id + "_neck"
h("create " + name_sec)
neck = getattr(h, name_sec)
neck.nseg = 3
neck.L = 1.5 # um
neck.diam = 0.1
#neck.Ra = 150.0 # Used by Grunditz et al 2008 (see supplemental material)
neck.Ra = 100.0 #
#neck.insert("pas")
neck.insert("kir")
h.factors_catrack() # Called on the NMOD catrack
neck.insert("catrack")
h.factors_caltrack() # Called on the NMOD caltrack
neck.insert("caltrack")
return neck
def create_head(self, neck, head_vol, big_spine):
"""Create the head of the spine and populate it with the right channels"""
name_sec = self.id + "_head"
h("create " + name_sec)
head = getattr(h, name_sec)
if big_spine:
head.L = 1
head.diam = 1.175
r = head.diam/2.
self.head_vol = math.pi * r * r * head.L
else:
head.L = 0.5
head.diam = math.sqrt(head_vol / (head.L * math.pi) ) * 2
self.Ra = 150.0
head.nseg = 1
head.connect(neck)
#head.insert("pas")
head.insert("kir")
head.insert("can")
head.insert("caq")
head.insert("car")
head.insert("skkca")
h.factors_caltrack()
head.insert("caltrack")
h.factors_catrack()
head.insert("catrack")
return head
def create_psd(self, head):
"""Create the Post Synaptic Density of the spine to model the different \
location of the different channel"""
name_sec = self.id + "_psd"
h("create " + name_sec)
psd = getattr(h, name_sec)
psd.L = 0.05 # um, Holmes & Levy 1990
psd.diam = 0.5 # Wilson 1998 (Shepherd book)
psd.Ra =100
psd.nseg = 1
psd.connect(head)
psd.insert("caL13")
psd.insert("caL")
psd.insert("rubin")
h.factors_catrack()
psd.insert("catrack")
h.factors_caltrack()
psd.insert("caltrack")
return psd
def create_synapses(self):
"Create an AMPA and an NMDA synapse in the spine"
synapses = []
# AMPA Syn
ampaSyn = Synapse('ampa', self.psd)
synapses.append(ampaSyn)
#NMDA Syn
nmdaSyn = Synapse('nmda', self.psd)
synapses.append(nmdaSyn)
return synapses
def attach(self, parentSec, parentx, childx):
"""Attach a spine to a parentSec and store the parentSec into an attribute.
Just an handy variation of the connect method"""
self.neck.connect(parentSec, parentx, childx)
self.parent = parentSec
self.pos = parentx
def calc_surface_area_spine(self):
"""Calculate the surface of the spine"""
surface_lat_neck = self.calc_lateral_area_section(self.neck)
surface_lat_head = self.calc_lateral_area_section(self.head)
surface_lat_psd = self.calc_lateral_area_section(self.psd)
# The top of the psd
r_psd = self.psd.diam/2
top_surface_psd = 2 * math.pi * r_psd * r_psd
# Adding all together
tot_surf = 0
tot_surf += surface_lat_neck
tot_surf += surface_lat_head
tot_surf += surface_lat_psd
tot_surf += top_surface_psd
return tot_surf
def calc_lateral_area_section(self, sec):
"""Calculate the lateral area of the section"""
r = sec.diam/2
h = sec.L
lateral_area = 2 * math.pi * r * r * h
return lateral_area
def set_ampa_equilibrium_baseline(self):
self.ampa_equilibrium_conc = self.ecellMan.ampar_P['Value']
logger.info( "Number of AMPAR @equilibrium: %s " %self.ampa_equilibrium_conc)
| mattions/TimeScales | neuronControl/spine.py | Python | bsd-3-clause | 8,023 | [
"Avogadro",
"NEURON"
] | 8cd7131ed2530f9551087154408ffc2740e47ebd27943e2f4b1afd4e33ab5430 |
from sympy.abc import x, y
from sympy import lambdify, sin, exp, pi
import numpy as np
from mayavi import mlab
import matplotlib.pyplot as plt
import invariants
## Start by setting up the function and transformed function
f = invariants.SymbolicImage(exp(-2*x**2 - 4*sin(0.5*x**2 + y)**2))
theta = np.pi/4
c = np.cos(theta)
s = np.sin(theta)
tform = invariants.E2Transform(1, 3*np.pi/4, 0, 0)
f2 = f.transform(tform)
## Plot the image and transformed image
X, Y = np.mgrid[-1:1:200j, -1:1:200j]
I1 = lambdify([x, y], f.f, "numpy")(X, Y)
I2 = lambdify([x, y], f2.f, "numpy")(X, Y)
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(I1, vmin=0, vmax=1)
ax2.imshow(I2, vmin=0, vmax=1)
plt.show()
##
S = invariants.E2_signature(f)
Sb = invariants.E2_signature(f2)
X, Y = np.mgrid[-1:1:200j, -1:1:200j]
Sn = [lambdify((x, y), S[i], "numpy")(X, Y) for i in range(3)]
Sbn = [lambdify((x, y), Sb[i], "numpy")(X, Y) for i in range(3)]
mlab.mesh(10*Sn[0], Sn[1], Sn[2], color=(0, 0, 1), opacity=0.5)
mlab.mesh(10*Sbn[0], Sbn[1], Sbn[2], color=(1, 0, 0), opacity=0.5)
mlab.show()
| rgbrown/invariants | code/test_symbolic.py | Python | mit | 1,073 | [
"Mayavi"
] | 36c8197b0e91adf386feff1489cb5e0d18a7ad1dbcb965d1da7621a791f78eae |
"""
===============================
Ordinary Least Squares with SGD
===============================
Simple Ordinary Least Squares example with stochastic
gradient descent, we draw the linear least
squares solution for a random set of points in the plane.
"""
print(__doc__)
import pylab as pl
from sklearn.linear_model import SGDRegressor
from sklearn.datasets.samples_generator import make_regression
# this is our test set, it's just a straight line with some
# gaussian noise
X, Y = make_regression(n_samples=100, n_features=1, n_informative=1,
random_state=0, noise=35)
# run the classifier
clf = SGDRegressor(alpha=0.1, n_iter=20)
clf.fit(X, Y)
# and plot the result
pl.scatter(X, Y, color='black')
pl.plot(X, clf.predict(X), color='blue', linewidth=3)
pl.show()
| florian-f/sklearn | examples/linear_model/plot_sgd_ols.py | Python | bsd-3-clause | 797 | [
"Gaussian"
] | c82a300e13b7916168dd332b2aa12350d16e03ccae33d145a24a87ac3b835369 |
import sys
import time
import os
import scipy
import numpy
import collections
import csv
import operator
import math
import itertools
import utils
mdir = "%s/work/partis/datascripts/meta/goo-dengue-10x" % os.getenv('HOME')
fabio_fname = '%s/fabio-pb-markers.tsv' % mdir
waickfname = '%s/waickman-markers.csv' % mdir
msigdbfname = '%s/msigdb-markers.csv' % mdir
barcodefname = 'barcodes.txt'
pcafname = 'pca.txt'
umapfname = 'umap.txt'
clusterfname = 'clusters.txt'
cell_type_fname = 'cell-types.csv'
cluster_vs_subtype_fname = 'clusters-vs-subtype.csv'
msdsets = [ # still need _UP or _DN tacked on at the end
('gc', 'GSE4142_GC_BCELL_VS_MEMORY_BCELL'), # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4517294/ and https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1413911/
('memory', 'GSE42724_MEMORY_BCELL_VS_PLASMABLAST'), # https://pubmed.ncbi.nlm.nih.gov/23613519/
('naive', 'GSE4142_NAIVE_BCELL_VS_PLASMA_CELL'),
('naive', 'GSE4142_NAIVE_VS_GC_BCELL'),
('naive', 'GSE4142_NAIVE_VS_MEMORY_BCELL'),
('naive', 'GSE42724_NAIVE_BCELL_VS_PLASMABLAST'),
('naive', 'GSE42724_NAIVE_VS_MEMORY_BCELL'),
('plasma', 'GSE4142_PLASMA_CELL_VS_GC_BCELL'),
('plasma', 'GSE4142_PLASMA_CELL_VS_MEMORY_BCELL'),
]
def msigdb_sets(updown):
assert updown in ['UP', 'DN']
return [(c, '%s_%s'%(n, updown)) for c, n in msdsets]
# ----------------------------------------------------------------------------------------
def markfname(iclust):
return 'markers-cluster-%d.csv' % iclust # NOTE R indexing, starts from 1
# ----------------------------------------------------------------------------------------
def install():
rcmds = ['install.packages("BiocManager", repos="http://cran.rstudio.com/")',
'BiocManager::install(c("scRNAseq", "scater", "scran", "uwot", "DropletUtils", "GSEABase", "AUCell", "celldex", "SingleR"), dependencies=TRUE)'] # "TENxPBMCData"
# maybe should add these, since the regular package installer tried to update them but couldn't?
# 'beachmat', 'BiocNeighbors', 'BiocStyle', 'biomaRt', 'DelayedArray', 'DelayedMatrixStats', 'edgeR', 'gdsfmt', 'GenomeInfoDb', 'HDF5Array', 'IRanges', 'MatrixGenerics', 'preprocessCore', 'Rhdf5lib', 'S4Vectors', 'scuttle', 'sparseMatrixStats'
utils.run_r(rcmds, 'auto')
# install()
# sys.exit()
# ----------------------------------------------------------------------------------------
def loadcmd(lib):
return 'library(%s, warn.conflicts=F, quietly=T)' % lib
# ----------------------------------------------------------------------------------------
def rplotcmds(plotdir, plotname, pcmd, rowcol=None, hw=None, ftype='png'):
rcmds = [
'%s("%s/%s.%s")' % (ftype, plotdir, plotname, ftype),
pcmd,
'dev.off()',
]
if rowcol is not None: # pair of (row, column) values for layout() command
rcmds.insert(1, 'layout(mat=matrix(c(%s), nrow=%d, ncol=%d, byrow=T))' % (', '.join(str(i) for i in range(1, rowcol[0]*rowcol[1] + 1)), rowcol[0], rowcol[1]))
if hw is not None: # pair of (width, height)
rcmds[0] = rcmds[0].rstrip(')') + ', width=%d, height=%d)' % tuple(hw)
return rcmds
# ----------------------------------------------------------------------------------------
def dimredcmds(outdir, glist_name, max_pca_components=25, n_top_genes=100):
# feature selection
rcmds = [
'print(sprintf(" using %%d genes: %%s", length(%s), paste(%s, collapse=" ")))' % (glist_name, glist_name),
'gene.bools <- rowData(sce)$Symbol %%in%% %s' % glist_name, # $ID
# dimensionality reduction
'set.seed(1)',
'n.comp <- min(%d, as.integer(length(%s)/2))' % (max_pca_components, glist_name),
'print(sprintf("running pca with %d components", n.comp))',
'sce <- runPCA(sce, ncomponents=n.comp, subset_row=gene.bools)',
'sce <- runUMAP(sce, dimred="PCA", external_neighbors=TRUE)', # uses pca results from previous step TODO test variety of N neighbors and min_dist values
# clustering
'g <- buildSNNGraph(sce, use.dimred="PCA")', # guild graph
'colLabels(sce) <- factor(igraph::cluster_louvain(g)$membership)', # use graph to cluster, and add the resulting labels to <sce>
'capture.output(attr(reducedDims(sce)$PCA, "rotation"), file="%s/%s")' % (outdir, pcafname), # pca to gene name rotation
# (reducedDim(sce, "PCA")[,] # a table of the pca values for each cell
'capture.output(reducedDim(sce, "UMAP")[,], file="%s/%s")' % (outdir, umapfname), # umap pair for each cell
'capture.output(colLabels(sce), file="%s/%s")' % (outdir, clusterfname), # cluster label for each cell
]
rcmds += rplotcmds(outdir, 'clusters', 'plotUMAP(sce, colour_by="label")')
# find marker genes
rcmds += [
'markers <- findMarkers(sce)', # <markers>: list of data frames for each cluster NOTE this uses *all* the genes, and i can't figure out a way to tell it not to
'print(sprintf(" top %d genes for each cluster (total size %%d)", length(sce$label)))' % n_top_genes,
'for(ich in seq(length(markers))) {' # look at genes that distinguish cluster ich from all other clusters
' print(sprintf(" cluster %2d size %4d frac %.2f", ich, sum(sce$label==ich), sum(sce$label==ich) / length(sce$label)))',
' interesting <- markers[[ich]]',
' best.set <- interesting[interesting$Top <= %d,]' % n_top_genes, # takes all genes that were in the top N for any pairwise comparison
' write.csv(best.set, sprintf("%s/markers-cluster-%%d.csv", ich))' % outdir,
' logFCs <- getMarkerEffects(best.set)',
]
# if make_plots: # these plots aren't really readable any more with n_top_genes more than 10 or so
# rcmds += [
# # rplotcmds(outdir, 'sprintf("%s/heatmap-%%d", ich)', # arg, this won't work this way
# ' png(sprintf("%s/heatmap-%%d.png", ich))' % outdir,
# ' pheatmap(logFCs, breaks=seq(-5, 5, length.out=101))',
# ' dev.off()',
# ]
rcmds += [
'}',
]
return rcmds
# ----------------------------------------------------------------------------------------
def run_msigdbr(outdir): # download the sets and write to csvs
# NOTE still had to sort|uniq|sort -t, -k2 this by hand (after removing first column with just line numbers)
if not os.path.exists(outdir):
os.makedirs(outdir)
rcmds = [
loadcmd('msigdbr'),
'all_gene_sets <- msigdbr(species="Homo sapiens", category="C7")',
'alldf <- data.frame()',
]
for ctype, gsname in msigdb_sets('UP'): # TODO should probably use the 'DN' ones in some way?
print ' %8s %s' % (ctype, gsname)
rcmds += [
'glist <- all_gene_sets[all_gene_sets$gs_name=="%s",]$human_gene_symbol' % gsname, # gives list of gene names
'df <- data.frame(glist, tag="%s")' % ctype,
'names(df)[names(df) == "glist"] <- "gene"',
'names(df)[names(df) == "tag"] <- "type"',
'alldf <- rbind(alldf, df)'
]
rcmds += [
'write.csv(alldf, "%s/msigdb-markers.csv")' % outdir,
]
utils.run_r(rcmds, 'auto', dryrun=False)
# ----------------------------------------------------------------------------------------
def ctype_ann_cmds(outdir, clnames): # cell type annotation (although we do some with celldex at the start as well)
# using custom references
rcmds = [
'clabels <- %s[%s$type!="", ]' % (clnames, clnames), # remove rows/genes with empty 'type'
'ctypes <- clabels$type[!duplicated(clabels$type)]', # just gets all values for the 'type' column
'all.sets <- lapply(ctypes, function(x) { GeneSet(lapply(clabels, `[`, clabels$type==x)$gene, setName=x) })',
'all.sets <- GeneSetCollection(all.sets)',
'rankings <- AUCell_buildRankings(counts(sce), plotStats=FALSE, verbose=FALSE)',
'cell.aucs <- AUCell_calcAUC(all.sets, rankings)',
'results <- t(assay(cell.aucs))',
'new.labels <- colnames(results)[max.col(results)]',
'write.csv(cbind(barcode=colData(sce)$Barcode, results, new.labels), "%s/%s")' % (outdir, cell_type_fname),
'tab <- table(new.labels, sce$label)', # only if we have clusters
'write.csv(tab, "%s/%s")' % (outdir, cluster_vs_subtype_fname),
]
rcmds += rplotcmds(outdir, 'auc-thresholds', 'AUCell_exploreThresholds(cell.aucs, plotHist=TRUE, assign=TRUE)', rowcol=(2, 2), hw=(1500, 1500)) # this is verbose as all hell
return rcmds
# ----------------------------------------------------------------------------------------
# takes the "dot product" (if normalized, it's cos theta) of two groups of logfc expression values to see how similar they are (yeah this is probably kind of dumb, but it'll give an idea of how similar they are)
def gexdot(gvals1, gvals2=None, normalize=True, recursed=False, return_gene_contributions=False, lbstr='', debug=False): # they're both ordered dicts gene : logfc (sorted by decreasing logfc)
if gvals2 is None:
gvals2 = gvals1
dprod = 0.
common_genes = set(gvals1) & set(gvals2)
gene_contribs = {}
for gene in common_genes: # loop over genes that they have in common
gene_contribs[gene] = gvals1[gene] * gvals2[gene]
dprod += gene_contribs[gene]
if normalize:
dprod /= gexdot(gvals1, normalize=False, recursed=True) + gexdot(gvals2, normalize=False, recursed=True)
if debug and not recursed:
if debug > 1:
lm = max(len(g) for g in gvals1.keys() + gvals2.keys())
def dstr(vl): return ' '.join('%s %-5.1f'%(utils.color('red' if g in common_genes else None, g, width=lm), v) for g, v in vl.items())
print ' %s' % dstr(gvals1)
print ' %s' % dstr(gvals2)
if len(common_genes) == 0:
pass # print ' none in common'
else:
print ' %s%5.2f %2d / (%2d | %2d): %s' % (lbstr, dprod, len(common_genes), len(gvals1), len(gvals2), ' '.join(common_genes))
if return_gene_contributions:
return dprod, gene_contribs
else:
return dprod
# ----------------------------------------------------------------------------------------
# read info from previous papers (from fabio + adam waickman, atm)
def read_ref_data():
fabfo = {vt : [] for vt in ['pb', 'naive']}
with open(fabio_fname) as ffile:
reader = csv.DictReader(ffile, delimiter='\t')
for line in reader:
pbval, nval = float(line['avg_plasma']), float(line['avg_naive'])
if pbval == 0 or nval == 0:
continue
fabfo['pb'].append((line['gene'], math.log(pbval / nval, 2))) # NOTE these don't really match up with his KS statistic particularly well
fabfo['naive'].append((line['gene'], math.log(nval / pbval, 2)))
# print 'fabio'
for vtype in fabfo:
fabfo[vtype] = collections.OrderedDict(sorted(fabfo[vtype], key=operator.itemgetter(1), reverse=True)) # definitely not always sorted
# print vtype, fabfo[vtype]
waickfo = {}
with open(waickfname) as wfile:
reader = csv.DictReader(wfile)
for line in reader:
if line['type'] not in waickfo:
waickfo[line['type']] = []
waickfo[line['type']].append((line['gene'], float(line['logfc'])))
# print 'waick'
for vtype in waickfo:
waickfo[vtype] = collections.OrderedDict(sorted(waickfo[vtype], key=operator.itemgetter(1), reverse=True)) # should already be sorted
# print vtype, waickfo[vtype]
return fabfo, waickfo
# ----------------------------------------------------------------------------------------
def read_gex(outdir, min_dprod=0.001, debug=True):
# barcodes
barcode_vals = []
with open('%s/%s' % (outdir, barcodefname)) as bfile:
for il, line in enumerate(bfile):
lstrs = line.strip().split()
icount = int(lstrs.pop(0).strip('[]'))
assert icount == len(barcode_vals) + 1 # <icount> is the R-style (1-based) index of the first element in this line
barcode_vals += [s.strip('"') for s in lstrs]
if debug:
print ' read %d barcodes' % len(barcode_vals)
# pca values
rotation_vals = collections.OrderedDict() # relationship between pca and gene names (map from gene name to list of pca components)
with open('%s/%s' % (outdir, pcafname)) as pfile:
pca_comps = None # names for each pca component (like PC3)
for il, line in enumerate(pfile):
if il == 0:
pca_comps = line.strip().split()
for ipc, pc in enumerate(pca_comps):
assert pc[:2] == 'PC'
assert int(pc[2:]) == ipc + 1
continue
lstrs = line.strip().split()
gene = lstrs.pop(0)
assert len(lstrs) == len(pca_comps)
rotation_vals[gene] = [float(vstr) for vstr in lstrs]
if debug:
print ' %d pca components for %d genes: %s' % (len(pca_comps), len(rotation_vals), ' '.join(rotation_vals))
# umap values
umap_vals = [] # list of (x, y) umap values for each cell
with open('%s/%s' % (outdir, umapfname)) as ufile:
for il, line in enumerate(ufile):
lstrs = line.strip().split()
if il == 0:
assert lstrs == ['[,%d]'%i for i in [1, 2]]
else:
icount = int(lstrs.pop(0).strip('[]').rstrip(','))
assert icount == len(umap_vals) + 1
umap_vals.append([float(v) for v in lstrs])
if debug:
print ' %d umap values' % len(umap_vals)
assert len(umap_vals) == len(barcode_vals)
# cluster assignments
cluster_vals = []
with open('%s/%s' % (outdir, clusterfname)) as cfile:
for il, line in enumerate(cfile):
lstrs = line.strip().split()
if lstrs[0] != 'Levels:':
icount = int(lstrs.pop(0).strip('[]'))
assert icount == len(cluster_vals) + 1 # <icount> is the R-style (1-based) index of the first element in this line
cluster_vals += [int(c) for c in lstrs]
else: # last line lists the clusters (not sure why they're called "levels"
cluster_ints = [int(c) for c in lstrs[1:]] # names of the clusters (1-based integer index)
assert cluster_ints == list(range(min(cluster_ints), max(cluster_ints) + 1))
assert set(cluster_ints) == set(cluster_vals)
if debug:
print ' %d values in %d clusters: %s' % (len(cluster_vals), len(cluster_ints), ' '.join(str(c) for c in cluster_ints))
assert len(cluster_vals) == len(barcode_vals)
# markers for each cluster
pairwise_cmarkers = {'%d-%d'%(c1, c2) : [] for c1, c2 in itertools.permutations(cluster_ints, 2)} # reversing them (1-2 vs 2-1) the values are just the negative of each other if they're both there, but you don't get all the same genes
summary_cmarkers = {'%d-summary'%c : [] for c in cluster_ints}
for cname in cluster_ints:
other_clusters = [c for c in cluster_ints if c != cname]
with open('%s/%s' % (outdir, markfname(cname))) as cfile:
reader = csv.DictReader(cfile)
assert list(reader.fieldnames)[:5] == ['', 'Top', 'p.value', 'FDR', 'summary.logFC'] # summary.logFC is the log-fold change from the comparison with the lowest p-value (not necessarily the min/max log fold change)
assert list(reader.fieldnames)[5:] == ['logFC.%d'%i for i in other_clusters] # should be a column for each pairwise comparison with another cluster
for il, line in enumerate(reader):
gene = line['']
logfc_vals = {i : float(line['logFC.%d'%i]) for i in other_clusters}
summary_cmarkers['%d-summary'%cname].append((gene, float(line['summary.logFC'])))
for c2 in logfc_vals:
pairwise_cmarkers['%d-%d'%(cname, c2)].append((gene, logfc_vals[c2]))
for ckey in pairwise_cmarkers:
pairwise_cmarkers[ckey] = collections.OrderedDict(sorted(pairwise_cmarkers[ckey], key=operator.itemgetter(1), reverse=True))
for ckey in summary_cmarkers:
summary_cmarkers[ckey] = collections.OrderedDict(sorted(summary_cmarkers[ckey], key=operator.itemgetter(1), reverse=True))
# reference marker genes
fabfo, waickfo = read_ref_data()
print ' interpretation: "this cluster is much more <type>-like than <clusters>, based on relative upregulation of <N genes>"'
print ' type any (N genes) vs. single clusters gene contributions (sum over clusters)'
for cname in cluster_ints:
print ' %s' % utils.color('green', 'cluster %d' % cname)
for vtype in waickfo:
clprods = []
all_contribs = {}
for ic2, c2 in enumerate([c for c in cluster_ints if c != cname]):
dprod, gene_contribs = gexdot(waickfo[vtype], pairwise_cmarkers['%d-%d'%(cname, c2)], return_gene_contributions=True, lbstr='%8s %s '%((vtype+':') if ic2==0 else '', utils.color('blue', str(c2)))) #, debug=True)
if dprod < min_dprod:
continue
clprods.append({'c2' : c2, 'dprod' : dprod, 'gene_contribs' : gene_contribs})
for tg, contr in gene_contribs.items():
if tg not in all_contribs:
all_contribs[tg] = 0.
all_contribs[tg] += gene_contribs[tg]
clprods = sorted(clprods, key=lambda x: x['dprod'], reverse=True)
anydprod, anygcontribs = gexdot(waickfo[vtype], summary_cmarkers['%d-summary'%cname], return_gene_contributions=True) # lbstr=XXX
sumclprod = {'dprod' : anydprod, 'gene_contribs' : anygcontribs}
if debug and len(clprods) > 0:
def dcol(d):
if d['dprod'] > 0.1:
return 'red'
elif d['dprod'] > 0.01:
return 'yellow'
else:
return None
def dpstr(d): return utils.color(dcol(d), '%.3f'%d['dprod'])
def cstr(d): return utils.color('blue', '%d' % d['c2'])
tmpstr = ' '.join('%s %s' % (cstr(d), dpstr(d)) for d in clprods)
anystr = ''
if sumclprod['dprod'] > min_dprod:
anystr = '%s (%2d)' % (dpstr(sumclprod), len(sumclprod['gene_contribs']))
print ' %s %-s %-s %s' % (utils.color('purple', vtype, width=8),
# utils.color('blue', ' '.join('%d'%d['c2'] for d in clprods), width=20, padside='right'),
anystr + ' ' * (12 - utils.len_excluding_colors(anystr)),
tmpstr + ' ' * (70 - utils.len_excluding_colors(tmpstr)),
' '.join('%s %.1f'%(g.lower(), c)for g, c in sorted(all_contribs.items(), key=operator.itemgetter(1), reverse=True)),
)
# return barcode_vals, rotation_vals, umap_vals, cluster_vals
# ----------------------------------------------------------------------------------------
def run_gex(feature_matrix_path, mname, outdir, make_plots=True):
allowed_mnames = ['hvg', 'fabio', 'waick', 'msigdb']
if not os.path.exists(outdir):
os.makedirs(outdir)
rcmds = [loadcmd(l) for l in ['DropletUtils', 'scater', 'scran', 'pheatmap', 'celldex', 'SingleR', 'GSEABase', 'AUCell']]
rcmds += [
'options(width=1000)',
'sce <- read10xCounts("%s")' % feature_matrix_path,
'rownames(sce) <- uniquifyFeatureNames(rowData(sce)$ID, rowData(sce)$Symbol)',
# quality control
'is.mito <- grepl("^MT-", rownames(sce))', # figure out which genes are mitochondrial
'qcstats <- perCellQCMetrics(sce, subsets=list(Mito=is.mito))',
'filtered <- quickPerCellQC(qcstats, percent_subsets="subsets_Mito_percent")', # identifies + removes outliers (in several qc metrics)
'sce <- sce[, !filtered$discard]',
'capture.output(colData(sce)$Barcode, file="%s/%s")' % (outdir, barcodefname),
# normalization
'sce <- logNormCounts(sce)',
# # get reference labels from celldex (so we can remove HSCs) NOTE turning this off since it doesn't really change anything
# 'ref <- celldex::BlueprintEncodeData()', # get reference labels from cache or download
# 'pred <- SingleR(test=sce, ref=ref, labels=ref$label.main)', # assign labels to our cells (more SingleR detail here: https://ltla.github.io/SingleRBook)
# 'table(pred$labels)',
# 'sce <- sce[, pred$labels=="B-cells"]', # discard non-b-cells
# 'pred <- pred[pred$labels=="B-cells", ]',
]
if mname == 'hvg': # hvg (hypervariable genes): this is the dumb/default/no prior info choice, where you use the ~700 most variable genes in our sample
rcmds += [
'dec <- modelGeneVar(sce)',
'hvg <- getTopHVGs(dec, prop=0.1)', # 0.1 gives ~700 most variable genes (if you combine these with the fabio/waick, these totally dominate everything, presumably because there's so many)
]
elif mname == 'fabio': # he gaves us the 200 most discriminative genes between pb and naive; here i use only the ones up'd in pb
rcmds += [
'fabio.markers <- read.csv("%s", sep="\t", header=T)' % fabio_fname, # $name # genes from fabio (200 most up- or down-regulated in plasmablast as compared to naive B cells)
]
elif mname == 'waick': # he gave us the 10 most upregulated genes in his samples for each of naive, memory, pb, and prepb, so 40 total
rcmds += [
'waick.markers <- read.csv("%s", header=T)' % waickfname, # 10 most up'd genes for naive, memory, pb, and prepb (40 total). Not sure if it's with respeect to each other, or other cells, or what
]
elif mname == 'msigdb': # msigdb: I searched through the msigdb "C7" immune cell sets for anything with plasma{blast,cell} and picked the sets that seemed most relevant, ended up with ~1000 genes from these two papers (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4517294/ and https://pubmed.ncbi.nlm.nih.gov/23613519/)
rcmds += [
'msigdb.markers <- read.csv("%s", header=T)' % msigdbfname, # see <msdsets> above -- I just searched through the G7 sets for ones with plasma{blast,cell} and took the nearby ones
]
else:
raise Exception('mname must be among %s (but got %s)' % (' '.join(allowed_mnames), mname))
# 'all_genes <- c(fabio.markers$gene, waick.markers$gene, hvg)', # don't do this, the hvgs overwhelm everything
mname_markers = mname
if mname != 'hvg':
mname_markers += '.markers$gene'
rcmds += dimredcmds(outdir, mname_markers)
# reference labels from celldex
rcmds += [
'ref <- celldex::BlueprintEncodeData()', # get reference labels from cache or download
'pred <- SingleR(test=sce, ref=ref, labels=ref$label.main)', # assign labels to our cells (more SingleR detail here: https://ltla.github.io/SingleRBook)
'table(pred$labels)',
]
rcmds += rplotcmds(outdir, 'celldex-label-heatmap', 'plotScoreHeatmap(pred)')
# only if we have clusters:
rcmds += ['tab <- table(Assigned=pred$pruned.labels, Cluster=colLabels(sce))',] # table (and then heatmap) comparing these new labels to our existing clusters
rcmds += rplotcmds(outdir, 'celldex-label-vs-cluster-heatmap', 'pheatmap(log2(tab+10), color=colorRampPalette(c("white", "blue"))(101))') # this will crash if you've filtered to only B cells
if mname != 'hvg': # doesn't @#%$!$ing work any more (failes with "unable to find an inherited method for function AUCell_buildRankings for signature DelayedMatrix")
rcmds += ctype_ann_cmds(outdir, mname_markers.replace('$gene', ''))
utils.run_r(rcmds, 'auto', logfname='%s/out'%outdir, dryrun=False)
| psathyrella/partis | python/gex.py | Python | gpl-3.0 | 24,326 | [
"BLAST"
] | 686560ac003050104cdca8f41719125978e61bdfb3b74ed6a8f40d1f8bfc59fa |
from __future__ import print_function, division
import unittest
class KnowValues(unittest.TestCase):
def test_water_si_wgth(self):
""" This is for initializing with SIESTA radial orbitals """
from pyscf.nao import gw as gw_c
from pyscf.nao import mf as mf_c
from pyscf.nao.m_x_zip import detect_maxima
from pyscf.nao.m_lorentzian import overlap, lorentzian, overlap_imag, overlap_real
from pyscf.nao.m_sf2f_rf import sf2f_rf
import numpy as np
import os
from numpy import arange, einsum, array, linalg, savetxt, column_stack, conj
from scipy.integrate import simps
dname = os.path.dirname(os.path.abspath(__file__))
mf = gw_c(label='water', cd=dname, verbosity=0, nocc=8, nvrt=6, rescf=False, tol_ia=1e-9)
#gw.kernel_gw()
weps = 0.3
wmax = 1.1*(mf.mo_energy[0,0,-1]-mf.mo_energy[0,0,0])
ww = arange(0.0, wmax, weps/3.0)+1j*weps
si0 = mf.si_c(ww)
hk_inv = linalg.inv(mf.hkernel_den)
print(__name__, si0.shape, hk_inv.shape)
si0_dens = -einsum('wpq,pq->w', si0, hk_inv).imag
si0_dens_re = einsum('wpq,pq->w', si0, hk_inv).real
savetxt('w2scr_int.txt', column_stack((ww.real, si0_dens)))
savetxt('w2scr_int_re.txt', column_stack((ww.real, si0_dens_re)))
wwmx = list(detect_maxima(ww, si0_dens))
print('nmax', len(wwmx))
dww = ww[1].real-ww[0].real
# Method 1
sf_si0 = np.zeros((len(wwmx), mf.nprod, mf.nprod))
for j,wmx in enumerate(wwmx): sf_si0[j] = -si0[np.argmin(abs(ww.real - wmx))].imag/np.pi
# Method 2, using imaginary part
sf_si0 = np.zeros((len(wwmx), mf.nprod, mf.nprod))
for j,wmx in enumerate(wwmx):
for i,fw in enumerate(ww.real):
sf_si0[j] += si0[i].imag*dww*lorentzian(fw, wmx, weps).imag
loi = overlap_imag(wwmx, weps)
iloi = np.linalg.inv(loi)
sf_si0 = einsum('fg,gab->fab', iloi,sf_si0)
## Method 3, using real part
#re_si0 = np.zeros((len(wwmx), mf.nprod, mf.nprod))
#for j,wmx in enumerate(wwmx):
#for i,fw in enumerate(ww.real):
#re_si0[j] += si0[i].real*dww*lorentzian(fw, wmx, weps).real
#lor = overlap_real(wwmx, weps)
#ilor = np.linalg.inv(lor)
#sf_si0 = einsum('fg,gab->fab', ilor,re_si0)
ivec = 0
for i,sf in enumerate(sf_si0):
ee,xx = np.linalg.eigh(sf)
sf_si0[i] = 0.0;
for e,x in zip(ee,xx.T):
if e>0.01:
sf_si0[i] += np.outer(x*e, x)
ivec += 1
print('nvecs', ivec)
si0_recon = sf2f_rf(ww.real, weps, wwmx, sf_si0)
si0_dens_recon = -einsum('wpq,pq->w', si0_recon, hk_inv).imag
si0_dens_recon_re = einsum('wpq,pq->w', si0_recon, hk_inv).real
savetxt('w2scr_int_recon.txt', column_stack((ww.real, si0_dens_recon)))
savetxt('w2scr_int_recon_re.txt', column_stack((ww.real, si0_dens_recon_re)))
if __name__ == "__main__": unittest.main()
| gkc1000/pyscf | pyscf/nao/test/test_0093_water_si_wght.py | Python | apache-2.0 | 2,872 | [
"PySCF",
"SIESTA"
] | ae451062d0659bf8e33dc7168611cfe3acecad7592e1747de2b28360540d76e3 |
""" :mod: SRM2Storage
=================
.. module: python
:synopsis: SRM v2 interface to StorageElement
"""
# # imports
import os
import re
import time
import errno
from stat import S_ISREG, S_ISDIR, S_IMODE, ST_MODE, ST_SIZE
# # from DIRAC
from DIRAC import gLogger, gConfig
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.File import getSize
# # RCSID
__RCSID__ = "$Id$"
class SRM2Storage( StorageBase ):
""" .. class:: SRM2Storage
SRM v2 interface to StorageElement using lcg_util and gfal
"""
_INPUT_PROTOCOLS = ['file', 'srm']
_OUTPUT_PROTOCOLS = ['file', 'root', 'dcap', 'gsidcap', 'rfio', 'srm', 'gsiftp']
def __init__( self, storageName, parameters ):
""" c'tor
:param self: self reference
:param str storageName: SE name
:param dict parameters: dictionary of protocol parameters
"""
StorageBase.__init__( self, storageName, parameters )
self.spaceToken = self.protocolParameters['SpaceToken']
self.log = gLogger.getSubLogger( "SRM2Storage", True )
self.isok = True
# # placeholder for gfal reference
self.gfal = None
# # placeholder for lcg_util reference
self.lcg_util = None
# # save c'tor params
self.pluginName = 'SRM2'
# # stage limit - 12h
self.stageTimeout = gConfig.getValue( '/Resources/StorageElements/StageTimeout', 12 * 60 * 60 )
# # 1 file timeout
self.fileTimeout = gConfig.getValue( '/Resources/StorageElements/FileTimeout', 30 )
# # nb of surls per gfal call
self.filesPerCall = gConfig.getValue( '/Resources/StorageElements/FilesPerCall', 20 )
# # gfal timeout
self.gfalTimeout = gConfig.getValue( "/Resources/StorageElements/GFAL_Timeout", 100 )
# # gfal long timeout
self.gfalLongTimeOut = gConfig.getValue( "/Resources/StorageElements/GFAL_LongTimeout", 1200 )
# # gfal retry on errno.ECONN
self.gfalRetry = gConfig.getValue( "/Resources/StorageElements/GFAL_Retry", 3 )
# # should busy files be considered to exist
self.busyFilesExist = gConfig.getValue( "/Resources/StorageElements/SRMBusyFilesExist", False )
# # set checksum type, by default this is 0 (GFAL_CKSM_NONE)
checksumType = gConfig.getValue( "/Resources/StorageElements/ChecksumType", '' )
# enum gfal_cksm_type, all in lcg_util
# GFAL_CKSM_NONE = 0,
# GFAL_CKSM_CRC32,
# GFAL_CKSM_ADLER32,
# GFAL_CKSM_MD5,
# GFAL_CKSM_SHA1
# GFAL_CKSM_NULL = 0
self.checksumTypes = { "CRC32" : 1, "ADLER32" : 2,
"MD5" : 3, "SHA1" : 4, "NONE" : 0, "NULL" : 0 }
self.checksumType = self.checksumTypes.get( checksumType.upper(), 0 )
if self.checksumType:
gLogger.debug( "SRM2Storage: will use %s checksum check" % self.checksumType )
elif checksumType:
gLogger.warn( "SRM2Storage: unknown checksum, check disabled", checksumType )
else:
self.log.debug( "SRM2Storage: will use no checksum" )
# setting some variables for use with lcg_utils
self.nobdii = 1
self.defaulttype = 2
self.voName = None
ret = getProxyInfo( disableVOMS = True )
if ret['OK'] and 'group' in ret['Value']:
self.voName = getVOForGroup( ret['Value']['group'] )
# enable lcg-utils debugging for debug level DEBUG
lcgdebuglevel = 0
dlevel = self.log.getLevel()
if dlevel == 'DEBUG':
lcgdebuglevel = 999
self.verbose = lcgdebuglevel
self.conf_file = 'ignored'
self.insecure = 0
self.defaultLocalProtocols = gConfig.getValue( '/Resources/StorageElements/DefaultProtocols', [] )
self.MAX_SINGLE_STREAM_SIZE = 1024 * 1024 * 10 # 10 MB ???
self.MIN_BANDWIDTH = 0.5 * ( 1024 * 1024 ) # 0.5 MB/s ???
def __importExternals( self ):
""" import lcg_util and gfalthr or gfal
:param self: self reference
"""
if ( self.lcg_util ) and ( self.gfal ):
return S_OK()
# # get lcg_util
try:
import lcg_util
self.log.debug( "Using lcg_util version %s from %s" % ( lcg_util.lcg_util_version(),
lcg_util.__file__ ) )
except ImportError, error:
gLogger.exception( "__importExternals: Failed to import lcg_util", "", error )
return S_ERROR( DErrno.EIMPERR, error )
# # and gfalthr
try:
import gfalthr as gfal
self.log.debug( 'Using gfalthr version %s from %s' % ( gfal.gfal_version(),
gfal.__file__ ) )
except ImportError, error:
self.log.warn( "__importExternals: Failed to import gfalthr: %s." % error )
# # so gfal maybe?
try:
import gfal
self.log.debug( "Using gfal version %s from %s" % ( gfal.gfal_version(),
gfal.__file__ ) )
except ImportError, error:
gLogger.exception( "__importExternals: Failed to import gfal", "", error )
return S_ERROR( DErrno.EIMPERR, error )
self.lcg_util = lcg_util
self.gfal = gfal
return S_OK()
################################################################################
#
# The methods below are URL manipulation methods
#
################################################################################
def __convertRandomSRMOutputIntoAFullURL( self, srmPath ):
""" When calling gfal operation, srm sometimes returns as a surl just the physical path on the storage
without the host, port and else. Sometimes it is the full surl. Sometimes it doesn't have the WSUrl.
So we correct all this and make sure that we return to the caller a full surl.
/my/base/path/the/lfn.raw -> srm://host:port/srm/v2/server?SFN=/my/base/path/the/lfn.raw
"""
from DIRAC.Core.Utilities.Pfn import pfnunparse, pfnparse
# if self.isURL( srmPath )['Value']:
if ':' in srmPath:
dic = pfnparse( srmPath )['Value']
dic['WSUrl'] = self.protocolParameters['WSUrl']
srmPath = pfnunparse( dic )['Value']
return S_OK( srmPath )
urlDict = dict( self.protocolParameters )
urlDict['Path'] = ''
unp = pfnunparse( urlDict )['Value']
unp = os.path.join( unp, srmPath.lstrip( '/' ) )
return S_OK( unp )
#############################################################
#
# These are the methods for directory manipulation
#
######################################################################
#
# This has to be updated once the new gfal_makedir() becomes available
# TODO: isn't it there? when somebody made above comment?
#
def createDirectory( self, path ):
""" mkdir -p path on storage
:param self: self reference
:param str path:
"""
urls = checkArgumentFormat( path )
if not urls['OK']:
return urls
urls = urls['Value']
successful = {}
failed = {}
self.log.debug( "createDirectory: Attempting to create %s directories." % len( urls ) )
for url in urls:
strippedUrl = url.rstrip( '/' )
res = self.__makeDirs( strippedUrl )
if res['OK']:
self.log.debug( "createDirectory: Successfully created directory on storage: %s" % url )
successful[url] = True
else:
self.log.error( "createDirectory: Failed to create directory on storage.",
"\n%s: \n%s" % ( url, res['Message'] ) )
failed[url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __makeDir( self, path ):
""" mkdir path in a weird way
:param self: self reference
:param str path:
"""
srcFile = os.path.join( os.environ.get( 'TMPDIR', os.environ.get( 'TMP', '/tmp' ) ), 'dirac_directory' )
if not os.path.exists( srcFile ):
dfile = open( srcFile, 'w' )
dfile.write( " " )
dfile.close()
destFile = os.path.join( path, 'dirac_directory.%s' % time.time() )
res = self.__putFile( srcFile, destFile, 0, checkExists = False )
if res['OK']:
self.__executeOperation( destFile, 'removeFile' )
return res
def __makeDirs( self, path ):
""" black magic contained within...
:param self: self reference
:param str path: dir name
"""
res = self.__executeOperation( path, 'exists' )
if not res['OK']:
return res
if res['Value']:
return S_OK()
# directory doesn't exist, create it
dirName = os.path.dirname( path )
res = self.__executeOperation( dirName, 'exists' )
if not res['OK']:
return res
if not res['Value']:
res = self.__makeDirs( dirName )
if not res['OK']:
return res
return self.__makeDir( path )
################################################################################
#
# The methods below use the new generic methods for executing operations
#
################################################################################
def removeFile( self, path ):
""" rm path on storage
:param self: self reference
:param str path: file path
"""
log = self.log.getSubLogger( 'removeFile' )
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "removeFile: Performing the removal of %s file(s)" % len( urls ) )
resDict = self.__gfaldeletesurls_wrapper( urls )
if not resDict["OK"]:
self.log.error( "Failed removeFile", "%s" % resDict["Message"] )
return resDict
resDict = resDict['Value']
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "removeFile: Successfully removed file: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
# This is the case where the file doesn't exist.
self.log.debug( "removeFile: File did not exist, successfully removed: %s" % pathSURL )
successful[pathSURL] = True
else:
errStr = "removeFile: Failed to remove file."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getTransportURL( self, path, protocols = False ):
""" obtain the tURLs for the supplied path and protocols
:param self: self reference
:param str path: path on storage
:param mixed protocols: protocols to use
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
if not protocols:
protocols = self.__getProtocols()
if not protocols['OK']:
return protocols
listProtocols = protocols['Value']
elif isinstance( protocols, basestring ):
listProtocols = [protocols]
elif isinstance( protocols, list ):
listProtocols = protocols
else:
return S_ERROR( errno.EPROTO, "getTransportURL: Must supply desired protocols to this plug-in." )
if self.protocolParameters['Protocol'] in listProtocols:
successful = {}
failed = {}
for url in urls:
if self.isURL( url )['Value']:
successful[url] = url
else:
failed[url] = 'getTransportURL: Failed to obtain turls.'
return S_OK( {'Successful' : successful, 'Failed' : failed} )
if not self.se.status()['Read']:
return S_ERROR( "SRM2Storage.getTransportURL: Read access not currently permitted." )
# Here we must go out to the SRM service
self.log.debug( "getTransportURL: Obtaining tURLs for %s file(s)." % len( urls ) )
resDict = self.__gfalturlsfromsurls_wrapper( urls, listProtocols )
if not resDict["OK"]:
self.log.error( "Failed getTransportURL", "%s" % resDict["Message"] )
return resDict
resDict = resDict['Value']
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "getTransportURL: Obtained tURL for file. %s" % pathSURL )
successful[pathSURL] = urlDict['turl']
elif urlDict['status'] == 2:
errMessage = "File does not exist"
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "getTransportURL: Failed to obtain turls."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def prestageFile( self, path, lifetime = 86400 ):
""" Issue prestage request for file
:param self: self reference
:param str path: PFN path
:param int lifetime: prestage lifetime in seconds (default 24h)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "prestageFile: Attempting to issue stage requests for %s file(s)." % len( urls ) )
resDict = self.__gfal_prestage_wrapper( urls, lifetime )
if not resDict["OK"]:
self.log.error( "Failed prestageFile", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "prestageFile: Issued stage request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 1:
self.log.debug( "prestageFile: File found to be already staged.", pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
# It can be 11 or 22 depending on the srm-ifce version...
elif urlDict['status'] in ( 11, 22 ):
self.log.debug( "prestageFile: Stage request for file %s queued.", pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "prestageFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "prestageFile: Failed issue stage request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def prestageFileStatus( self, path ):
""" Monitor prestage request for files
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "prestageFileStatus: Attempting to get status "
"of stage requests for %s file(s)." % len( urls ) )
resDict = self.__gfal_prestagestatus_wrapper( urls )
if not resDict["OK"]:
self.log.error( "Failed prestageFileStatus", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 1:
self.log.debug( "SRM2Storage.prestageFileStatus: File found to be staged %s." % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 0:
self.log.debug( "SRM2Storage.prestageFileStatus: File not staged %s." % pathSURL )
successful[pathSURL] = False
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.prestageFileStatus: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.prestageFileStatus: Failed get prestage status."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getFileMetadata( self, path ):
""" Get metadata associated to the file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
self.log.debug( "getFileMetadata: Obtaining metadata for %s file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed getFileMetadata:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed.update( resDict['Failed'] )
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
# Get back the input value for that surl
path = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[path] = statDict
else:
errStr = "getFileMetadata: Supplied path is not a file."
self.log.error( errStr, path )
failed[path] = errStr
elif urlDict['status'] == 2:
errMessage = "getFileMetadata: File does not exist."
self.log.error( errMessage, path )
failed[path] = errMessage
else:
errStr = "SRM2Storage.getFileMetadata: Failed to get file metadata."
errMessage = "%s: %s" % ( path, urlDict['ErrorMessage'] )
self.log.error( errStr, errMessage )
failed[path] = "%s %s" % ( errStr, urlDict['ErrorMessage'] )
else:
errStr = "getFileMetadata: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def isFile( self, path ):
"""Check if the given path exists and it is a file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "isFile: Checking whether %s path(s) are file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed isFile:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[pathSURL] = True
else:
self.log.debug( "isFile: Path is not a file: %s" % pathSURL )
successful[pathSURL] = False
elif urlDict['status'] == 2:
errMessage = "isFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "isFile: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "isFile: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def pinFile( self, path, lifetime = 86400 ):
""" Pin a file with a given lifetime
:param self: self reference
:param str path: PFN path
:param int lifetime: pin lifetime in seconds (default 24h)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "pinFile: Attempting to pin %s file(s)." % len( urls ) )
resDict = self.__gfal_pin_wrapper( urls, lifetime )
if not resDict["OK"]:
self.log.error( "Failed pinFile:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "pinFile: Issued pin request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "pinFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "pinFile: Failed issue pin request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def releaseFile( self, path ):
""" Release a pinned file
:param self: self reference
:param str path: PFN path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "releaseFile: Attempting to release %s file(s)." % len( urls ) )
resDict = self.__gfal_release_wrapper( urls )
if not resDict["OK"]:
self.log.error( "Failed releaseFile:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "Failed releaseFile:", "Issued release request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "releaseFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "releaseFile: Failed issue release request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def exists( self, path ):
""" Check if the given path exists. """
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.exists: Checking the existance of %s path(s)" % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed exists:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "SRM2Storage.exists: Path exists: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] in ( 16, 22 ) and self.busyFilesExist:
self.log.debug( "SRM2Storage.exists: Path exists, file busy (e.g., stage-out): %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
self.log.debug( "SRM2Storage.exists: Path does not exist: %s" % pathSURL )
successful[pathSURL] = False
else:
errStr = "SRM2Storage.exists: Failed to get path metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.exists: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getFileSize( self, path ):
"""Get the physical size of the given file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.getFileSize: Obtaining the size of %s file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed getFileSize:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[pathSURL] = statDict['Size']
else:
errStr = "SRM2Storage.getFileSize: Supplied path is not a file."
self.log.verbose( errStr, pathSURL )
failed[pathSURL] = errStr
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.getFileSize: File does not exist."
self.log.verbose( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.getFileSize: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.verbose( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.getFileSize: Returned element does not contain surl."
self.log.error( errStr, self.name )
return S_ERROR( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def putFile( self, path, sourceSize = 0 ):
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for dest_url, src_file in urls.items():
# Create destination directory
res = self.__executeOperation( os.path.dirname( dest_url ), 'createDirectory' )
if not res['OK']:
failed[dest_url] = res['Message']
else:
res = self.__putFile( src_file, dest_url, sourceSize )
if res['OK']:
successful[dest_url] = res['Value']
else:
failed[dest_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __putFile( self, src_file, dest_url, sourceSize, checkExists = True ):
""" put :src_file: to :dest_url:
:param self: self reference
:param str src_file: file path in local fs
:param str dest_url: destination url on storage
:param int sourceSize: :src_file: size in B
"""
if checkExists:
# Pre-transfer check
res = self.__executeOperation( dest_url, 'exists' )
if not res['OK']:
self.log.debug( "__putFile: Failed to find pre-existance of destination file." )
return res
if res['Value']:
res = self.__executeOperation( dest_url, 'removeFile' )
if not res['OK']:
self.log.debug( "__putFile: Failed to remove remote file %s." % dest_url )
else:
self.log.debug( "__putFile: Removed remote file %s." % dest_url )
dsttype = self.defaulttype
src_spacetokendesc = ''
dest_spacetokendesc = self.spaceToken
if re.search( 'srm:', src_file ):
src_url = src_file
srctype = 2
if not sourceSize:
return S_ERROR( errno.EINVAL, "__putFile: For file replication the source file size must be provided." )
else:
if not os.path.exists( src_file ):
errStr = "__putFile: The source local file does not exist."
self.log.error( errStr, src_file )
return S_ERROR( errno.ENOENT, errStr )
sourceSize = getSize( src_file )
if sourceSize == -1:
errStr = "__putFile: Failed to get file size."
self.log.error( errStr, src_file )
return S_ERROR( DErrno.EFILESIZE, errStr )
src_url = 'file:%s' % src_file
srctype = 0
if sourceSize == 0:
errStr = "__putFile: Source file is zero size."
self.log.error( errStr, src_file )
return S_ERROR( DErrno.EFILESIZE, errStr )
timeout = int( sourceSize / self.MIN_BANDWIDTH + 300 )
if sourceSize > self.MAX_SINGLE_STREAM_SIZE:
nbstreams = 4
else:
nbstreams = 1
self.log.info( "__putFile: Executing transfer of %s to %s using %s streams" % ( src_url, dest_url, nbstreams ) )
res = pythonCall( ( timeout + 10 ), self.__lcg_cp_wrapper, src_url, dest_url,
srctype, dsttype, nbstreams, timeout, src_spacetokendesc, dest_spacetokendesc )
if not res['OK']:
# Remove the failed replica, just in case
result = self.__executeOperation( dest_url, 'removeFile' )
if result['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return res
res = res['Value']
if not res['OK']: # pylint: disable=invalid-sequence-index
# Remove the failed replica, just in case
result = self.__executeOperation( dest_url, 'removeFile' )
if result['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return res
errCode, errStr = res['Value']
if errCode == 0:
self.log.info( '__putFile: Successfully put file to storage.' )
# # checksum check? return!
if self.checksumType:
return S_OK( sourceSize )
# # else compare sizes
res = self.__executeOperation( dest_url, 'getFileSize' )
if res['OK']:
destinationSize = res['Value']
if sourceSize == destinationSize :
self.log.debug( "__putFile: Post transfer check successful." )
return S_OK( destinationSize )
errorMessage = "__putFile: Source and destination file sizes do not match."
errObj = S_ERROR( DErrno.EFILESIZE, errorMessage )
self.log.error( errorMessage, src_url )
else:
errorMessage = "__putFile: Failed to put file to storage."
errObj = S_ERROR( errCode, errorMessage )
if errCode > 0:
errStr = "%s %s" % ( errStr, os.strerror( errCode ) )
self.log.error( errorMessage, errStr )
res = self.__executeOperation( dest_url, 'removeFile' )
if res['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return errObj
def __lcg_cp_wrapper( self, src_url, dest_url, srctype, dsttype, nbstreams,
timeout, src_spacetokendesc, dest_spacetokendesc ):
""" lcg_util.lcg_cp wrapper
:param self: self reference
:param str src_url: source SURL
:param str dest_url: destination SURL
:param srctype: source SE type
:param dsttype: destination SE type
:param int nbstreams: nb of streams used for trasnfer
:param int timeout: timeout in seconds
:param str src_spacetoken: source space token
:param str dest_spacetoken: destination space token
"""
try:
errCode, errStr = self.lcg_util.lcg_cp4( src_url,
dest_url,
self.defaulttype,
srctype,
dsttype,
self.nobdii,
self.voName,
nbstreams,
self.conf_file,
self.insecure,
self.verbose,
timeout,
src_spacetokendesc,
dest_spacetokendesc,
self.checksumType )
if not isinstance( errCode, int ):
self.log.error( "__lcg_cp_wrapper: Returned errCode was not an integer",
"%s %s" % ( errCode, type( errCode ) ) )
if isinstance( errCode, list ):
msg = []
for err in errCode:
msg.append( '%s of type %s' % ( err, type( err ) ) )
self.log.error( "__lcg_cp_wrapper: Returned errCode was List:\n" , "\n".join( msg ) )
return S_ERROR( DErrno.EGFAL, "__lcg_cp_wrapper: Returned errCode was not an integer %s" % msg )
if not isinstance( errStr, basestring ):
self.log.error( "__lcg_cp_wrapper: Returned errStr was not a string",
"%s %s" % ( errCode, type( errStr ) ) )
return S_ERROR( DErrno.EGFAL, "__lcg_cp_wrapper: Returned errStr was not a string" )
return S_OK( ( errCode, errStr ) )
except Exception, error:
self.log.exception( "__lcg_cp_wrapper", "", error )
return S_ERROR( DErrno.EGFAL, "__lcg_cp_wrapper:Exception while attempting file upload %s" % error )
def getFile( self, path, localPath = False ):
""" make a local copy of a storage :path:
:param self: self reference
:param str path: path on storage
:param mixed localPath: if not specified, os.getcwd()
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for src_url in urls:
fileName = os.path.basename( src_url )
if localPath:
dest_file = "%s/%s" % ( localPath, fileName )
else:
dest_file = "%s/%s" % ( os.getcwd(), fileName )
res = self.__getFile( src_url, dest_file )
if res['OK']:
successful[src_url] = res['Value']
else:
failed[src_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getFile( self, src_url, dest_file ):
""" do a real copy of storage file :src_url: to local fs under :dest_file:
:param self: self reference
:param str src_url: SE url to cp
:param str dest_file: local fs path
"""
if not os.path.exists( os.path.dirname( dest_file ) ):
os.makedirs( os.path.dirname( dest_file ) )
if os.path.exists( dest_file ):
self.log.debug( "__getFile: Local file already exists %s. Removing..." % dest_file )
os.remove( dest_file )
srctype = self.defaulttype
src_spacetokendesc = self.spaceToken
dsttype = 0
dest_spacetokendesc = ''
dest_url = 'file:%s' % dest_file
res = self.__executeOperation( src_url, 'getFileSize' )
if not res['OK']:
return res
remoteSize = res['Value']
timeout = int( remoteSize / self.MIN_BANDWIDTH * 4 + 300 )
nbstreams = 1
self.log.info( "__getFile: Using %d streams" % nbstreams )
self.log.info( "__getFile: Executing transfer of %s to %s" % ( src_url, dest_url ) )
res = pythonCall( ( timeout + 10 ), self.__lcg_cp_wrapper, src_url, dest_url, srctype, dsttype,
nbstreams, timeout, src_spacetokendesc, dest_spacetokendesc )
if not res['OK']:
return res
res = res['Value']
if not res['OK']: # pylint:disable=invalid-sequence-index
return res
errCode, errStr = res['Value']
if errCode == 0:
self.log.debug( '__getFile: Got a file from storage.' )
localSize = getSize( dest_file )
if localSize == remoteSize:
self.log.debug( "__getFile: Post transfer check successful." )
return S_OK( localSize )
errorMessage = "__getFile: Source and destination file sizes do not match."
self.log.error( errorMessage, src_url )
else:
errorMessage = "__getFile: Failed to get file from storage."
if errCode > 0:
errStr = "%s %s" % ( errStr, os.strerror( errCode ) )
self.log.error( errorMessage, errStr )
if os.path.exists( dest_file ):
self.log.debug( "__getFile: Removing local file %s." % dest_file )
os.remove( dest_file )
return S_ERROR( errorMessage )
def __executeOperation( self, url, method ):
""" executes the requested :method: with the supplied url
:param self: self reference
:param str url: SE url
:param str method: fcn name
"""
fcn = None
if hasattr( self, method ) and callable( getattr( self, method ) ):
fcn = getattr( self, method )
if not fcn:
return S_ERROR( DErrno.ENOMETH, "Unable to invoke %s, it isn't a member funtion of SRM2Storage" % method )
res = fcn( url )
if not res['OK']:
return res
elif url not in res['Value']['Successful']:
if url not in res['Value']['Failed']:
if res['Value']['Failed'].values():
return S_ERROR( res['Value']['Failed'].values()[0] )
elif res['Value']['Successful'].values():
return S_OK( res['Value']['Successful'].values()[0] )
else:
self.log.error( 'Wrong Return structure', str( res['Value'] ) )
return S_ERROR( 'Wrong Return structure' )
return S_ERROR( res['Value']['Failed'][url] )
return S_OK( res['Value']['Successful'][url] )
############################################################################################
#
# Directory based methods
#
def isDirectory( self, path ):
""" isdir on storage path
:param self: self reference
:param str path: SE path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.isDirectory: Checking whether %s path(s) are directory(ies)" % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed isDirectory:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
dirSURL = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['Directory']:
successful[dirSURL] = True
else:
self.log.debug( "SRM2Storage.isDirectory: Path is not a directory: %s" % dirSURL )
successful[dirSURL] = False
elif urlDict['status'] == 2:
self.log.debug( "SRM2Storage.isDirectory: Supplied path does not exist: %s" % dirSURL )
failed[dirSURL] = S_ERROR( errno.ENOENT, '%s path does not exist' % dirSURL )
else:
errStr = "SRM2Storage.isDirectory: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( dirSURL, errMessage ) )
failed[dirSURL] = S_ERROR( DErrno.EGFAL, "Failed to get file metadata %s" % errMessage )
else:
errStr = "SRM2Storage.isDirectory: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getDirectoryMetadata( self, path ):
""" get the metadata for the directory :path:
:param self: self reference
:param str path: SE path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "getDirectoryMetadata: Attempting to obtain metadata for %s directories." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed getDirectoryMetadata:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if "surl" in urlDict and urlDict["surl"]:
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['Directory']:
statDict['Exists'] = True
statDict['Type'] = 'Directory'
successful[pathSURL] = statDict
else:
errStr = "SRM2Storage.getDirectoryMetadata: Supplied path is not a directory."
self.log.error( errStr, pathSURL )
failed[pathSURL] = errStr
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.getDirectoryMetadata: Directory does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = S_ERROR( errno.ENOENT, 'SRM2Storage.getDirectoryMetadata: %s does not exist' % pathSURL )
else:
errStr = "SRM2Storage.getDirectoryMetadata: Failed to get directory metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = S_ERROR( DErrno.EGFAL, "Failed to get file metadata %s" % errMessage )
else:
errStr = "SRM2Storage.getDirectoryMetadata: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getDirectorySize( self, path ):
""" Get the size of the directory on the storage
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.getDirectorySize: Attempting to get size of %s directories." % len( urls ) )
res = self.listDirectory( urls )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for directory, dirDict in res['Value']['Successful'].items():
directorySize = 0
directoryFiles = 0
filesDict = dirDict['Files']
for fileDict in filesDict.itervalues():
directorySize += fileDict['Size']
directoryFiles += 1
self.log.debug( "SRM2Storage.getDirectorySize: Successfully obtained size of %s." % directory )
subDirectories = len( dirDict['SubDirs'] )
successful[directory] = { 'Files' : directoryFiles, 'Size' : directorySize, 'SubDirs' : subDirectories }
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def listDirectory( self, path, internalCall = False ):
""" List the contents of the directory on the storage
:param interalCall: if this method is called from within
that class, we should return index on SURL, not LFNs
Do not set it to True for a normal call, unless you really
know what you are doing !!
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.listDirectory: Attempting to list %s directories." % len( urls ) )
# The gfal method returns an url, while we want to return an LFN to the user
urlStart = self.getURLBase( withWSUrl = True )['Value']
res = self.isDirectory( urls )
if not res['OK']:
return res
failed = res['Value']['Failed']
directories = {}
for url, isDirectory in res['Value']['Successful'].items():
if isDirectory:
directories[url] = False
else:
errStr = "SRM2Storage.listDirectory: Directory does not exist."
self.log.error( errStr, url )
failed[url] = errStr
resDict = self.__gfal_lsdir_wrapper( directories )
if not resDict["OK"]:
self.log.error( "Failed listDirectory:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
# resDict = self.__gfalls_wrapper(directories,1)['Value']
failed.update( resDict['Failed'] )
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if "surl" in urlDict and urlDict["surl"]:
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
successful[pathSURL] = {}
self.log.debug( "SRM2Storage.listDirectory: Successfully listed directory %s" % pathSURL )
subPathDirs = {}
subPathFiles = {}
if "subpaths" in urlDict:
subPaths = urlDict['subpaths']
# Parse the subpaths for the directory
for subPathDict in subPaths:
subPathSURL = subPathDict['surl']
if subPathDict['status'] == 22:
self.log.error( "File found with status 22", subPathDict )
elif subPathDict['status'] == 0:
statDict = self.__parse_file_metadata( subPathDict )
# Replace the URL with an LFN in normal cases, but return the SURL if it is an internal call
subPathLFN = subPathSURL if internalCall else subPathSURL.replace( urlStart, '' )
if statDict['File']:
subPathFiles[subPathLFN] = statDict
elif statDict['Directory']:
subPathDirs[subPathLFN] = statDict
# Keep the infomation about this path's subpaths
successful[pathSURL]['SubDirs'] = subPathDirs
successful[pathSURL]['Files'] = subPathFiles
else:
errStr = "SRM2Storage.listDirectory: Failed to list directory."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.listDirectory: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def putDirectory( self, path ):
""" cp -R local SE
puts a local directory to the physical storage together with all its files and subdirectories
:param self: self reference
:param str path: local fs path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "SRM2Storage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
for destDir, sourceDir in urls.items():
res = self.__putDir( sourceDir, destDir )
if res['OK']:
if res['Value']['AllPut']:
self.log.debug( "SRM2Storage.putDirectory: Successfully put directory to remote storage: %s" % destDir )
successful[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "SRM2Storage.putDirectory: Failed to put entire directory to remote storage.", destDir )
failed[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "SRM2Storage.putDirectory: Completely failed to put directory to remote storage.", destDir )
failed[destDir] = { "Files" : 0, "Size" : 0 }
return S_OK( { "Failed" : failed, "Successful" : successful } )
def __putDir( self, src_directory, dest_directory ):
""" Black magic contained within...
"""
filesPut = 0
sizePut = 0
# Check the local directory exists
if not os.path.isdir( src_directory ):
errStr = "SRM2Storage.__putDir: The supplied directory does not exist."
self.log.error( errStr, src_directory )
return S_ERROR( errno.ENOENT, errStr )
# Get the local directory contents
contents = os.listdir( src_directory )
allSuccessful = True
directoryFiles = {}
for fileName in contents:
localPath = '%s/%s' % ( src_directory, fileName )
remotePath = '%s/%s' % ( dest_directory, fileName )
if not os.path.isdir( localPath ):
directoryFiles[remotePath] = localPath
else:
res = self.__putDir( localPath, remotePath )
if not res['OK']:
errStr = "SRM2Storage.__putDir: Failed to put directory to storage."
self.log.error( errStr, res['Message'] )
else:
if not res['Value']['AllPut']:
pathSuccessful = False
filesPut += res['Value']['Files']
sizePut += res['Value']['Size']
if directoryFiles:
res = self.putFile( directoryFiles )
if not res['OK']:
self.log.error( "SRM2Storage.__putDir: Failed to put files to storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesPut += 1
sizePut += fileSize
if res['Value']['Failed']:
allSuccessful = False
return S_OK( { 'AllPut' : allSuccessful, 'Files' : filesPut, 'Size' : sizePut } )
def getDirectory( self, path, localPath = False ):
""" Get a local copy in the current directory of a physical file specified by its path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
self.log.debug( "SRM2Storage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
for src_dir in urls:
dirName = os.path.basename( src_dir )
if localPath:
dest_dir = "%s/%s" % ( localPath, dirName )
else:
dest_dir = "%s/%s" % ( os.getcwd(), dirName )
res = self.__getDir( src_dir, dest_dir )
if res['OK']:
if res['Value']['AllGot']:
self.log.debug( "SRM2Storage.getDirectory: Successfully got local copy of %s" % src_dir )
successful[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "SRM2Storage.getDirectory: Failed to get entire directory.", src_dir )
failed[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "SRM2Storage.getDirectory: Completely failed to get local copy of directory.", src_dir )
failed[src_dir] = {'Files':0, 'Size':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __getDir( self, srcDirectory, destDirectory ):
""" Black magic contained within...
"""
filesGot = 0
sizeGot = 0
# Check the remote directory exists
res = self.__executeOperation( srcDirectory, 'isDirectory' )
if not res['OK']:
self.log.error( "SRM2Storage.__getDir: Failed to find the supplied source directory.", srcDirectory )
return res
if not res['Value']:
errStr = "SRM2Storage.__getDir: The supplied source path is not a directory."
self.log.error( errStr, srcDirectory )
return S_ERROR( errno.ENOTDIR, errStr )
# Check the local directory exists and create it if not
if not os.path.exists( destDirectory ):
os.makedirs( destDirectory )
# Get the remote directory contents
res = self.__getDirectoryContents( srcDirectory )
if not res['OK']:
errStr = "SRM2Storage.__getDir: Failed to list the source directory."
self.log.error( errStr, srcDirectory )
filesToGet = res['Value']['Files']
subDirs = res['Value']['SubDirs']
allSuccessful = True
res = self.getFile( filesToGet.keys(), destDirectory )
if not res['OK']:
self.log.error( "SRM2Storage.__getDir: Failed to get files from storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesGot += 1
sizeGot += fileSize
if res['Value']['Failed']:
allSuccessful = False
for subDir in subDirs:
subDirName = os.path.basename( subDir )
localPath = '%s/%s' % ( destDirectory, subDirName )
res = self.__getDir( subDir, localPath )
if res['OK']:
if not res['Value']['AllGot']:
allSuccessful = True
filesGot += res['Value']['Files']
sizeGot += res['Value']['Size']
return S_OK( { 'AllGot' : allSuccessful, 'Files' : filesGot, 'Size' : sizeGot } )
def removeDirectory( self, path, recursive = False ):
""" Remove a directory
"""
if recursive:
return self.__removeDirectoryRecursive( path )
else:
return self.__removeDirectory( path )
def __removeDirectory( self, directory ):
""" This function removes the directory on the storage
"""
res = checkArgumentFormat( directory )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.__removeDirectory: Attempting to remove %s directories." % len( urls ) )
resDict = self.__gfal_removedir_wrapper( urls )
if not resDict["OK"]:
self.log.error( "Failed __removeDirectory:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if "surl" in urlDict:
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "__removeDirectory: Successfully removed directory: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
# This is the case where the file doesn't exist.
self.log.debug( "__removeDirectory: Directory did not exist, sucessfully removed: %s" % pathSURL )
successful[pathSURL] = True
else:
errStr = "removeDirectory: Failed to remove directory."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __removeDirectoryRecursive( self, directory ):
""" Recursively removes the directory and sub dirs. Repeatedly calls itself to delete recursively.
"""
res = checkArgumentFormat( directory )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "SRM2Storage.__removeDirectory: Attempting to recursively remove %s directories." % len( urls ) )
for directory in urls:
self.log.debug( "SRM2Storage.removeDirectory: Attempting to remove %s" % directory )
res = self.__getDirectoryContents( directory )
resDict = {'FilesRemoved':0, 'SizeRemoved':0}
if not res['OK']:
failed[directory] = resDict
else:
filesToRemove = res['Value']['Files']
subDirs = res['Value']['SubDirs']
# Remove all the files in the directory
res = self.__removeDirectoryFiles( filesToRemove )
resDict['FilesRemoved'] += res['FilesRemoved']
resDict['SizeRemoved'] += res['SizeRemoved']
allFilesRemoved = res['AllRemoved']
# Remove all the sub-directories
res = self.__removeSubDirectories( subDirs )
resDict['FilesRemoved'] += res['FilesRemoved']
resDict['SizeRemoved'] += res['SizeRemoved']
allSubDirsRemoved = res['AllRemoved']
# If all the files and sub-directories are removed then remove the directory
allRemoved = False
if allFilesRemoved and allSubDirsRemoved:
self.log.debug( "SRM2Storage.removeDirectory: Successfully removed all files and sub-directories." )
res = self.__removeDirectory( directory )
if res['OK']:
if directory in res['Value']['Successful']:
self.log.debug( "SRM2Storage.removeDirectory: Successfully removed the directory %s." % directory )
allRemoved = True
# Report the result
if allRemoved:
successful[directory] = resDict
else:
failed[directory] = resDict
return S_OK ( { 'Failed' : failed, 'Successful' : successful } )
def __getDirectoryContents( self, directory ):
""" ls of storage element :directory:
:param self: self reference
:param str directory: SE path
"""
directory = directory.rstrip( '/' )
errMessage = "SRM2Storage.__getDirectoryContents: Failed to list directory."
res = self.listDirectory( directory, internalCall = True )
if not res['OK']:
self.log.error( errMessage, res['Message'] )
return res
if directory in res['Value']['Failed']:
self.log.error( errMessage, res['Value']['Failed'][directory] )
return S_ERROR( errMessage )
surlsDict = res['Value']['Successful'][directory]['Files']
subDirsDict = res['Value']['Successful'][directory]['SubDirs']
filesToRemove = dict( [ ( url, surlsDict[url]['Size'] ) for url in surlsDict ] )
return S_OK ( { 'Files' : filesToRemove, 'SubDirs' : subDirsDict.keys() } )
def __removeDirectoryFiles( self, filesToRemove ):
""" rm files from SE
:param self: self reference
:param dict filesToRemove: dict with surls as keys
"""
resDict = { 'FilesRemoved' : 0, 'SizeRemoved' : 0, 'AllRemoved' : True }
if len( filesToRemove ) > 0:
res = self.removeFile( filesToRemove.keys() )
if res['OK']:
for removedSurl in res['Value']['Successful']:
resDict['FilesRemoved'] += 1
resDict['SizeRemoved'] += filesToRemove[removedSurl]
if res['Value']['Failed']:
resDict['AllRemoved'] = False
self.log.debug( "SRM2Storage.__removeDirectoryFiles:",
"Removed %s files of size %s bytes." % ( resDict['FilesRemoved'], resDict['SizeRemoved'] ) )
return resDict
def __removeSubDirectories( self, subDirectories ):
""" rm -rf sub-directories
:param self: self reference
:param dict subDirectories: dict with surls as keys
"""
resDict = { 'FilesRemoved' : 0, 'SizeRemoved' : 0, 'AllRemoved' : True }
if len( subDirectories ) > 0:
res = self.__removeDirectoryRecursive( subDirectories )
if res['OK']:
for removedSubDir, removedDict in res['Value']['Successful'].items():
resDict['FilesRemoved'] += removedDict['FilesRemoved']
resDict['SizeRemoved'] += removedDict['SizeRemoved']
self.log.debug( "SRM2Storage.__removeSubDirectories:",
"Removed %s files of size %s bytes from %s." % ( removedDict['FilesRemoved'],
removedDict['SizeRemoved'],
removedSubDir ) )
for removedSubDir, removedDict in res['Value']['Failed'].items():
resDict['FilesRemoved'] += removedDict['FilesRemoved']
resDict['SizeRemoved'] += removedDict['SizeRemoved']
self.log.debug( "SRM2Storage.__removeSubDirectories:",
"Removed %s files of size %s bytes from %s." % ( removedDict['FilesRemoved'],
removedDict['SizeRemoved'],
removedSubDir ) )
if len( res['Value']['Failed'] ) != 0:
resDict['AllRemoved'] = False
return resDict
@staticmethod
def __parse_stat( stat ):
""" get size, ftype and mode from stat struct
:param stat: stat struct
"""
statDict = { 'File' : False, 'Directory' : False }
if S_ISREG( stat[ST_MODE] ):
statDict['File'] = True
statDict['Size'] = stat[ST_SIZE]
if S_ISDIR( stat[ST_MODE] ):
statDict['Directory'] = True
statDict['Mode'] = S_IMODE( stat[ST_MODE] )
return statDict
def __parse_file_metadata( self, urlDict ):
""" parse and save bits and pieces of metadata info
:param self: self reference
:param urlDict: gfal call results
"""
statDict = self.__parse_stat( urlDict['stat'] )
if statDict['File']:
statDict.setdefault( "Checksum", "" )
if "checksum" in urlDict and ( urlDict['checksum'] != '0x' ):
statDict["Checksum"] = urlDict["checksum"]
if 'locality' in urlDict:
urlLocality = urlDict['locality']
if re.search( 'ONLINE', urlLocality ):
statDict['Cached'] = 1
else:
statDict['Cached'] = 0
if re.search( 'NEARLINE', urlLocality ):
statDict['Migrated'] = 1
else:
statDict['Migrated'] = 0
statDict['Lost'] = 0
if re.search( 'LOST', urlLocality ):
statDict['Lost'] = 1
statDict['Unavailable'] = 0
if re.search( 'UNAVAILABLE', urlLocality ):
statDict['Unavailable'] = 1
statDict['Accessible'] = not statDict['Lost'] and statDict['Cached'] and not statDict['Unavailable']
else:
statDict['Cached'] = 0
statDict['Migrated'] = 0
statDict['Lost'] = 0
statDict['Unavailable'] = 1
statDict['Accessible'] = False
return self._addCommonMetadata( statDict )
def __getProtocols( self ):
""" returns list of protocols to use at a given site
:warn: priority is given to a protocols list defined in the CS
:param self: self reference
"""
sections = gConfig.getSections( '/Resources/StorageElements/%s/' % ( self.name ) )
if not sections['OK']:
return sections
protocolsList = []
for section in sections['Value']:
path = '/Resources/StorageElements/%s/%s/PluginName' % ( self.name, section )
if gConfig.getValue( path, '' ) == self.pluginName:
protPath = '/Resources/StorageElements/%s/%s/ProtocolsList' % ( self.name, section )
siteProtocols = gConfig.getValue( protPath, [] )
if siteProtocols:
self.log.debug( 'Found SE protocols list to override defaults:', ', '.join( siteProtocols, ) )
protocolsList = siteProtocols
if not protocolsList:
self.log.debug( "SRM2Storage.getTransportURL: No protocols provided, using defaults." )
protocolsList = gConfig.getValue( '/Resources/StorageElements/DefaultProtocols', [] )
if not protocolsList:
return S_ERROR( DErrno.ECONF, "SRM2Storage.getTransportURL: No local protocols defined and no defaults found" )
return S_OK( protocolsList )
#######################################################################
#
# These methods wrap the gfal functionality with the accounting. All these are based on __gfal_operation_wrapper()
#
#######################################################################
def __gfal_lsdir_wrapper( self, urls ):
""" This is a hack because the structures returned by the different SEs are different
"""
step = 200
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_lslevels'] = 1
gfalDict['srmv2_lscount'] = step
failed = {}
successful = []
for url in urls:
allResults = []
gfalDict['surls'] = [url]
gfalDict['nbfiles'] = 1
gfalDict['timeout'] = self.gfalLongTimeOut
allObtained = False
iteration = 0
while not allObtained:
gfalDict['srmv2_lsoffset'] = iteration * step
iteration += 1
res = self.__gfal_operation_wrapper( 'gfal_ls', gfalDict )
# gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
if re.search( r'\[SE\]\[Ls\]\[SRM_FAILURE\]', res['Message'] ):
allObtained = True
else:
failed[url] = res['Message']
else:
results = res['Value']
tempStep = step
if len( results ) == 1:
for result in results:
if 'subpaths' in result:
results = result['subpaths']
tempStep = step - 1
elif re.search( re.escape( result['surl'] ), url ):
results = []
allResults.extend( results )
if len( results ) < tempStep:
allObtained = True
for urlDict in allResults:
if 'surl' in urlDict:
urlDict['surl'] = self.__convertRandomSRMOutputIntoAFullURL( urlDict['surl'] )['Value']
successful.append( { 'surl' : url, 'status' : 0, 'subpaths' : allResults } )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : successful, "Failed" : failed } )
def __gfal_ls_wrapper( self, urls, depth ):
""" gfal_ls wrapper
:param self: self reference
:param list urls: urls to check
:param int depth: srmv2_lslevel (0 or 1)
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_lslevels'] = depth
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_ls', gfalDict )
# gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_prestage_wrapper( self, urls, lifetime ):
""" gfal_prestage wrapper
:param self: self refefence
:param list urls: urls to prestage
:param int lifetime: prestage lifetime
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
gfalDict['srmv2_desiredpintime'] = lifetime
gfalDict['protocols'] = self.defaultLocalProtocols
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.stageTimeout
res = self.__gfal_operation_wrapper( 'gfal_prestage',
gfalDict,
timeout_sendreceive = self.fileTimeout * len( urls ) )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfalturlsfromsurls_wrapper( self, urls, listProtocols ):
""" This is a function that can be reused everywhere to perform the gfal_turlsfromsurls
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['protocols'] = listProtocols
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_turlsfromsurls', gfalDict )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfaldeletesurls_wrapper( self, urls ):
""" This is a function that can be reused everywhere to perform the gfal_deletesurls
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_deletesurls', gfalDict )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_removedir_wrapper( self, urls ):
""" This is a function that can be reused everywhere to perform the gfal_removedir
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_removedir', gfalDict )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_pin_wrapper( self, urls, lifetime ):
""" gfal_pin wrapper
:param self: self reference
:param dict urls: dict { url : srmRequestID }
:param int lifetime: pin lifetime in seconds
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
gfalDict['srmv2_desiredpintime'] = lifetime
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_pin', gfalDict, srmRequestID = srmRequestID )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_prestagestatus_wrapper( self, urls ):
""" gfal_prestagestatus wrapper
:param self: self reference
:param dict urls: dict { srmRequestID : [ url, url ] }
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_prestagestatus', gfalDict, srmRequestID = srmRequestID )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_release_wrapper( self, urls ):
""" gfal_release wrapper
:param self: self reference
:param dict urls: dict { url : srmRequestID }
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_release', gfalDict, srmRequestID = srmRequestID )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_operation_wrapper( self, operation, gfalDict, srmRequestID = None, timeout_sendreceive = None ):
""" gfal fcn call wrapper
:param self: self reference
:param str operation: gfal fcn name
:param dict gfalDict: gfal dict passed to create gfal object
:param srmRequestID: srmRequestID
:param int timeout_sendreceive: gfal sendreceive timeout in seconds
"""
res = self.__importExternals()
if not res['OK']:
return res
# # timeout for one gfal_exec call
timeout = gfalDict['timeout'] if not timeout_sendreceive else timeout_sendreceive
# # pythonCall timeout ( const + timeout * ( 2 ** retry )
pyTimeout = 300 + ( timeout * ( 2 ** self.gfalRetry ) )
res = pythonCall( pyTimeout, self.__gfal_wrapper, operation, gfalDict, srmRequestID, timeout_sendreceive )
if not res['OK']:
return res
res = res['Value']
if res['OK']: # pylint: disable=invalid-sequence-index
for urlDict in res['Value']: # pylint: disable=invalid-sequence-index
if 'surl' in urlDict:
urlDict['surl'] = self.__convertRandomSRMOutputIntoAFullURL( urlDict['surl'] )['Value']
return res
def __gfal_wrapper( self, operation, gfalDict, srmRequestID = None, timeout_sendreceive = None ):
""" execute gfal :operation:
1. create gfalObject from gfalDict
2. set srmRequestID
3. call __gfal_exec
4. get gfal ids
5. get gfal results
6. destroy gfal object
:param self: self reference
:param str operation: fcn to call
:param dict gfalDict: gfal config dict
:param srmRequestID: srm request id
:param int timeout_sendrecieve: timeout for gfal send request and recieve results in seconds
"""
gfalObject = self.__create_gfal_object( gfalDict )
if not gfalObject["OK"]:
return gfalObject
gfalObject = gfalObject['Value']
if srmRequestID:
res = self.__gfal_set_ids( gfalObject, srmRequestID )
if not res['OK']:
return res
res = self.__gfal_exec( gfalObject, operation, timeout_sendreceive )
if not res['OK']:
return res
gfalObject = res['Value']
res = self.__gfal_get_ids( gfalObject )
if not res['OK']:
newSRMRequestID = srmRequestID
else:
newSRMRequestID = res['Value']
res = self.__get_results( gfalObject )
if not res['OK']:
return res
resultList = []
pfnRes = res['Value']
for myDict in pfnRes:
myDict['SRMReqID'] = newSRMRequestID
resultList.append( myDict )
self.__destroy_gfal_object( gfalObject )
return S_OK( resultList )
#######################################################################
#
# The following methods provide the interaction with gfal functionality
#
#######################################################################
def __create_gfal_object( self, gfalDict ):
""" create gfal object by calling gfal.gfal_init
:param self: self reference
:param dict gfalDict: gfal params dict
"""
self.log.debug( "SRM2Storage.__create_gfal_object: Performing gfal_init." )
errCode, gfalObject, errMessage = self.gfal.gfal_init( gfalDict )
if not errCode == 0:
errStr = "SRM2Storage.__create_gfal_object: Failed to perform gfal_init."
if not errMessage:
errMessage = os.strerror( self.gfal.gfal_get_errno() )
self.log.error( errStr, errMessage )
return S_ERROR( self.gfal.gfal_get_errno(), errMessage )
else:
self.log.debug( "SRM2Storage.__create_gfal_object: Successfully performed gfal_init." )
return S_OK( gfalObject )
def __gfal_set_ids( self, gfalObject, srmRequestID ):
""" set :srmRequestID:
:param self: self reference
:param gfalObject: gfal object
:param str srmRequestID: srm request id
"""
self.log.debug( "SRM2Storage.__gfal_set_ids: Performing gfal_set_ids." )
errCode, gfalObject, errMessage = self.gfal.gfal_set_ids( gfalObject, None, 0, str( srmRequestID ) )
if not errCode == 0:
errStr = "SRM2Storage.__gfal_set_ids: Failed to perform gfal_set_ids."
if not errMessage:
errMessage = os.strerror( errCode )
self.log.error( errStr, errMessage )
return S_ERROR( errCode, errMessage )
else:
self.log.debug( "SRM2Storage.__gfal_set_ids: Successfully performed gfal_set_ids." )
return S_OK( gfalObject )
def __gfal_exec( self, gfalObject, method, timeout_sendreceive = None ):
"""
In gfal, for every method (synchronous or asynchronous), you can define a sendreceive timeout and a connect timeout.
The connect timeout sets the maximum amount of time a client accepts to wait before establishing a successful TCP
connection to SRM (default 60 seconds).
The sendreceive timeout, allows a client to set the maximum time the send
of a request to SRM can take (normally all send operations return immediately unless there is no free TCP buffer)
and the maximum time to receive a reply (a token for example). Default 0, i.e. no timeout.
The srm timeout for asynchronous requests default to 3600 seconds
gfal_set_timeout_connect (int value)
gfal_set_timeout_sendreceive (int value)
gfal_set_timeout_bdii (int value)
gfal_set_timeout_srm (int value)
"""
self.log.debug( "SRM2Storage.__gfal_exec(%s): Starting" % method )
fcn = None
if hasattr( self.gfal, method ) and callable( getattr( self.gfal, method ) ):
fcn = getattr( self.gfal, method )
if not fcn:
return S_ERROR( DErrno.ENOMETH, "%s is not a member function of gfal" % method )
# return S_ERROR( "Unable to invoke %s for gfal, it isn't a member function" % method )
# # retry
retry = self.gfalRetry if self.gfalRetry else 1
# # initial timeout
timeout = timeout_sendreceive if timeout_sendreceive else self.gfalTimeout
# # errCode, errMessage, errNo
errCode, errMessage, errNo = 0, "", 0
for _i in range( retry ):
self.gfal.gfal_set_timeout_sendreceive( timeout )
errCode, gfalObject, errMessage = fcn( gfalObject )
if not errCode:
break
errNo = self.gfal.gfal_get_errno()
if errCode == -1 and errNo == errno.ECOMM:
timeout *= 2
self.log.debug( "SRM2Storage.__gfal_exec(%s): got ECOMM, extending timeout to %s s" % ( method, timeout ) )
if errCode:
errStr = "SRM2Storage.__gfal_exec(%s): Execution failed." % method
if not errMessage:
errMessage = os.strerror( errNo ) if errNo else "UNKNOWN ERROR"
self.log.error( errStr, errMessage )
return S_ERROR( errCode, errMessage )
self.log.debug( "SRM2Storage.__gfal_exec(%s): Successfully invoked." % method )
return S_OK( gfalObject )
def __get_results( self, gfalObject ):
""" retrive gfal results
:param self: self reference
:param gfalObject: gfal object
"""
self.log.debug( "SRM2Storage.__get_results: Performing gfal_get_results" )
numberOfResults, gfalObject, listOfResults = self.gfal.gfal_get_results( gfalObject )
if numberOfResults <= 0:
errObj = S_ERROR( DErrno.EGFAL, "SRM2Storage.__get_results: Did not obtain results with gfal_get_results." )
self.log.error( errObj )
return errObj
else:
self.log.debug( "SRM2Storage.__get_results: Retrieved %s results from gfal_get_results." % numberOfResults )
for result in listOfResults:
if result['status'] != 0:
if result['explanation']:
errMessage = result['explanation']
elif result['status'] > 0:
errMessage = os.strerror( result['status'] )
result['ErrorMessage'] = errMessage
return S_OK( listOfResults )
def __gfal_get_ids( self, gfalObject ):
""" get srmRequestToken
:param self: self reference
:param gfalObject: gfalObject
"""
self.log.debug( "SRM2Storage.__gfal_get_ids: Performing gfal_get_ids." )
numberOfResults, gfalObject, _srm1RequestID, _srm1FileIDs, srmRequestToken = self.gfal.gfal_get_ids( gfalObject )
if numberOfResults <= 0:
errObj = S_ERROR( DErrno.EGFAL, "__gfal_get_ids could not obtain request ID" )
self.log.error( errObj )
return errObj
else:
self.log.debug( "SRM2Storage.__get_gfal_ids: Retrieved SRM request ID %s." % srmRequestToken )
return S_OK( srmRequestToken )
def __destroy_gfal_object( self, gfalObject ):
""" del gfal object by calling gfal.gfal_internal_free
:param self: self reference
:param gfalObject: gfalObject
"""
self.log.debug( "SRM2Storage.__destroy_gfal_object: Performing gfal_internal_free." )
self.gfal.gfal_internal_free( gfalObject )
return S_OK()
| Andrew-McNab-UK/DIRAC | Resources/Storage/SRM2Storage.py | Python | gpl-3.0 | 81,950 | [
"DIRAC"
] | 564624ee1a9699ecb9bcdf9b12e42c40ad1932ca3e155257b77136438bbfe480 |
#! /usr/bin/env python
"""Generate C code from an ASDL description."""
# TO DO
# handle fields that have a type but no name
import os, sys, traceback
import asdl
TABSIZE = 8
MAX_COL = 80
def get_c_type(name):
"""Return a string for the C name of the type.
This function special cases the default types provided by asdl:
identifier, string, int, bool.
"""
# XXX ack! need to figure out where Id is useful and where string
if isinstance(name, asdl.Id):
name = name.value
if name in asdl.builtin_types:
return name
else:
return "%s_ty" % name
def reflow_lines(s, depth):
"""Reflow the line s indented depth tabs.
Return a sequence of lines where no line extends beyond MAX_COL
when properly indented. The first line is properly indented based
exclusively on depth * TABSIZE. All following lines -- these are
the reflowed lines generated by this function -- start at the same
column as the first character beyond the opening { in the first
line.
"""
size = MAX_COL - depth * TABSIZE
if len(s) < size:
return [s]
lines = []
cur = s
padding = ""
while len(cur) > size:
i = cur.rfind(' ', 0, size)
# XXX this should be fixed for real
if i == -1 and 'GeneratorExp' in cur:
i = size + 3
assert i != -1, "Impossible line %d to reflow: %s" % (size, `s`)
lines.append(padding + cur[:i])
if len(lines) == 1:
# find new size based on brace
j = cur.find('{', 0, i)
if j >= 0:
j += 2 # account for the brace and the space after it
size -= j
padding = " " * j
else:
j = cur.find('(', 0, i)
if j >= 0:
j += 1 # account for the paren (no space after it)
size -= j
padding = " " * j
cur = cur[i+1:]
else:
lines.append(padding + cur)
return lines
def is_simple(sum):
"""Return True if a sum is a simple.
A sum is simple if its types have no fields, e.g.
unaryop = Invert | Not | UAdd | USub
"""
for t in sum.types:
if t.fields:
return False
return True
class EmitVisitor(asdl.VisitorBase):
"""Visit that emits lines"""
def __init__(self, file):
self.file = file
super(EmitVisitor, self).__init__()
def emit(self, s, depth, reflow=1):
# XXX reflow long lines?
if reflow:
lines = reflow_lines(s, depth)
else:
lines = [s]
for line in lines:
line = (" " * TABSIZE * depth) + line + "\n"
self.file.write(line)
class TypeDefVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if is_simple(sum):
self.simple_sum(sum, name, depth)
else:
self.sum_with_constructors(sum, name, depth)
def simple_sum(self, sum, name, depth):
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s=%d" % (type.name, i + 1))
enums = ", ".join(enum)
ctype = get_c_type(name)
s = "typedef enum _%s { %s } %s;" % (name, enums, ctype)
self.emit(s, depth)
self.emit("", depth)
def sum_with_constructors(self, sum, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
def visitProduct(self, product, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
class StructVisitor(EmitVisitor):
"""Visitor to generate typdefs for AST."""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if not is_simple(sum):
self.sum_with_constructors(sum, name, depth)
def sum_with_constructors(self, sum, name, depth):
def emit(s, depth=depth):
self.emit(s % sys._getframe(1).f_locals, depth)
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s_kind=%d" % (type.name, i + 1))
emit("enum _%(name)s_kind {" + ", ".join(enum) + "};")
emit("struct _%(name)s {")
emit("enum _%(name)s_kind kind;", depth + 1)
emit("union {", depth + 1)
for t in sum.types:
self.visit(t, depth + 2)
emit("} v;", depth + 1)
for field in sum.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
emit("%s %s;" % (type, field.name), depth + 1);
emit("};")
emit("")
def visitConstructor(self, cons, depth):
if cons.fields:
self.emit("struct {", depth)
for f in cons.fields:
self.visit(f, depth + 1)
self.emit("} %s;" % cons.name, depth)
self.emit("", depth)
else:
# XXX not sure what I want here, nothing is probably fine
pass
def visitField(self, field, depth):
# XXX need to lookup field.type, because it might be something
# like a builtin...
ctype = get_c_type(field.type)
name = field.name
if field.seq:
if field.type.value in ('cmpop',):
self.emit("asdl_int_seq *%(name)s;" % locals(), depth)
else:
self.emit("asdl_seq *%(name)s;" % locals(), depth)
else:
self.emit("%(ctype)s %(name)s;" % locals(), depth)
def visitProduct(self, product, name, depth):
self.emit("struct _%(name)s {" % locals(), depth)
for f in product.fields:
self.visit(f, depth + 1)
self.emit("};", depth)
self.emit("", depth)
class PrototypeVisitor(EmitVisitor):
"""Generate function prototypes for the .h file"""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
if is_simple(sum):
pass # XXX
else:
for t in sum.types:
self.visit(t, name, sum.attributes)
def get_args(self, fields):
"""Return list of C argument into, one for each field.
Argument info is 3-tuple of a C type, variable name, and flag
that is true if type can be NULL.
"""
args = []
unnamed = {}
for f in fields:
if f.name is None:
name = f.type
c = unnamed[name] = unnamed.get(name, 0) + 1
if c > 1:
name = "name%d" % (c - 1)
else:
name = f.name
# XXX should extend get_c_type() to handle this
if f.seq:
if f.type.value in ('cmpop',):
ctype = "asdl_int_seq *"
else:
ctype = "asdl_seq *"
else:
ctype = get_c_type(f.type)
args.append((ctype, name, f.opt or f.seq))
return args
def visitConstructor(self, cons, type, attrs):
args = self.get_args(cons.fields)
attrs = self.get_args(attrs)
ctype = get_c_type(type)
self.emit_function(cons.name, ctype, args, attrs)
def emit_function(self, name, ctype, args, attrs, union=1):
args = args + attrs
if args:
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args])
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
margs = "a0"
for i in range(1, len(args)+1):
margs += ", a%d" % i
self.emit("#define %s(%s) _Py_%s(%s)" % (name, margs, name, margs), 0,
reflow = 0)
self.emit("%s _Py_%s(%s);" % (ctype, name, argstr), 0)
def visitProduct(self, prod, name):
self.emit_function(name, get_c_type(name),
self.get_args(prod.fields), [], union=0)
class FunctionVisitor(PrototypeVisitor):
"""Visitor to generate constructor functions for AST."""
def emit_function(self, name, ctype, args, attrs, union=1):
def emit(s, depth=0, reflow=1):
self.emit(s, depth, reflow)
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args + attrs])
if argstr:
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
self.emit("%s" % ctype, 0)
emit("%s(%s)" % (name, argstr))
emit("{")
emit("%s p;" % ctype, 1)
for argtype, argname, opt in args:
# XXX hack alert: false is allowed for a bool
if not opt and not (argtype == "bool" or argtype == "int"):
emit("if (!%s) {" % argname, 1)
emit("PyErr_SetString(PyExc_ValueError,", 2)
msg = "field %s is required for %s" % (argname, name)
emit(' "%s");' % msg,
2, reflow=0)
emit('return NULL;', 2)
emit('}', 1)
emit("p = (%s)PyArena_Malloc(arena, sizeof(*p));" % ctype, 1);
emit("if (!p) {", 1)
emit("PyErr_NoMemory();", 2)
emit("return NULL;", 2)
emit("}", 1)
if union:
self.emit_body_union(name, args, attrs)
else:
self.emit_body_struct(name, args, attrs)
emit("return p;", 1)
emit("}")
emit("")
def emit_body_union(self, name, args, attrs):
def emit(s, depth=0, reflow=1):
self.emit(s, depth, reflow)
emit("p->kind = %s_kind;" % name, 1)
for argtype, argname, opt in args:
emit("p->v.%s.%s = %s;" % (name, argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
def emit_body_struct(self, name, args, attrs):
def emit(s, depth=0, reflow=1):
self.emit(s, depth, reflow)
for argtype, argname, opt in args:
emit("p->%s = %s;" % (argname, argname), 1)
assert not attrs
class PickleVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
pass
def visitProduct(self, sum, name):
pass
def visitConstructor(self, cons, name):
pass
def visitField(self, sum):
pass
class MarshalPrototypeVisitor(PickleVisitor):
def prototype(self, sum, name):
ctype = get_c_type(name)
self.emit("static int marshal_write_%s(PyObject **, int *, %s);"
% (name, ctype), 0)
visitProduct = visitSum = prototype
class PyTypesDeclareVisitor(PickleVisitor):
def visitProduct(self, prod, name):
self.emit("static PyTypeObject *%s_type;" % name, 0)
self.emit("static PyObject* ast2obj_%s(void*);" % name, 0)
if prod.fields:
self.emit("static char *%s_fields[]={" % name,0)
for f in prod.fields:
self.emit('"%s",' % f.name, 1)
self.emit("};", 0)
def visitSum(self, sum, name):
self.emit("static PyTypeObject *%s_type;" % name, 0)
if sum.attributes:
self.emit("static char *%s_attributes[] = {" % name, 0)
for a in sum.attributes:
self.emit('"%s",' % a.name, 1)
self.emit("};", 0)
ptype = "void*"
if is_simple(sum):
ptype = get_c_type(name)
tnames = []
for t in sum.types:
tnames.append(str(t.name)+"_singleton")
tnames = ", *".join(tnames)
self.emit("static PyObject *%s;" % tnames, 0)
self.emit("static PyObject* ast2obj_%s(%s);" % (name, ptype), 0)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
self.emit("static PyTypeObject *%s_type;" % cons.name, 0)
if cons.fields:
self.emit("static char *%s_fields[]={" % cons.name, 0)
for t in cons.fields:
self.emit('"%s",' % t.name, 1)
self.emit("};",0)
class PyTypesVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("""
static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields)
{
PyObject *fnames, *result;
int i;
if (num_fields) {
fnames = PyTuple_New(num_fields);
if (!fnames) return NULL;
} else {
fnames = Py_None;
Py_INCREF(Py_None);
}
for(i=0; i < num_fields; i++) {
PyObject *field = PyString_FromString(fields[i]);
if (!field) {
Py_DECREF(fnames);
return NULL;
}
PyTuple_SET_ITEM(fnames, i, field);
}
result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){sOss}",
type, base, "_fields", fnames, "__module__", "_ast");
Py_DECREF(fnames);
return (PyTypeObject*)result;
}
static int add_attributes(PyTypeObject* type, char**attrs, int num_fields)
{
int i, result;
PyObject *s, *l = PyList_New(num_fields);
if (!l) return 0;
for(i = 0; i < num_fields; i++) {
s = PyString_FromString(attrs[i]);
if (!s) {
Py_DECREF(l);
return 0;
}
PyList_SET_ITEM(l, i, s);
}
result = PyObject_SetAttrString((PyObject*)type, "_attributes", l) >= 0;
Py_DECREF(l);
return result;
}
static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*))
{
int i, n = asdl_seq_LEN(seq);
PyObject *result = PyList_New(n);
PyObject *value;
if (!result)
return NULL;
for (i = 0; i < n; i++) {
value = func(asdl_seq_GET(seq, i));
if (!value) {
Py_DECREF(result);
return NULL;
}
PyList_SET_ITEM(result, i, value);
}
return result;
}
static PyObject* ast2obj_object(void *o)
{
if (!o)
o = Py_None;
Py_INCREF((PyObject*)o);
return (PyObject*)o;
}
#define ast2obj_identifier ast2obj_object
#define ast2obj_string ast2obj_object
static PyObject* ast2obj_bool(bool b)
{
return PyBool_FromLong(b);
}
static PyObject* ast2obj_int(bool b)
{
return PyInt_FromLong(b);
}
""", 0, reflow=False)
self.emit("static int init_types(void)",0)
self.emit("{", 0)
self.emit("static int initialized;", 1)
self.emit("if (initialized) return 1;", 1)
self.emit('AST_type = make_type("AST", &PyBaseObject_Type, NULL, 0);', 1)
for dfn in mod.dfns:
self.visit(dfn)
self.emit("initialized = 1;", 1)
self.emit("return 1;", 1);
self.emit("}", 0)
def visitProduct(self, prod, name):
if prod.fields:
fields = name.value+"_fields"
else:
fields = "NULL"
self.emit('%s_type = make_type("%s", AST_type, %s, %d);' %
(name, name, fields, len(prod.fields)), 1)
self.emit("if (!%s_type) return 0;" % name, 1)
def visitSum(self, sum, name):
self.emit('%s_type = make_type("%s", AST_type, NULL, 0);' % (name, name), 1)
self.emit("if (!%s_type) return 0;" % name, 1)
if sum.attributes:
self.emit("if (!add_attributes(%s_type, %s_attributes, %d)) return 0;" %
(name, name, len(sum.attributes)), 1)
else:
self.emit("if (!add_attributes(%s_type, NULL, 0)) return 0;" % name, 1)
simple = is_simple(sum)
for t in sum.types:
self.visitConstructor(t, name, simple)
def visitConstructor(self, cons, name, simple):
if cons.fields:
fields = cons.name.value+"_fields"
else:
fields = "NULL"
self.emit('%s_type = make_type("%s", %s_type, %s, %d);' %
(cons.name, cons.name, name, fields, len(cons.fields)), 1)
self.emit("if (!%s_type) return 0;" % cons.name, 1)
if simple:
self.emit("%s_singleton = PyType_GenericNew(%s_type, NULL, NULL);" %
(cons.name, cons.name), 1)
self.emit("if (!%s_singleton) return 0;" % cons.name, 1)
class ASTModuleVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("PyMODINIT_FUNC", 0)
self.emit("init_ast(void)", 0)
self.emit("{", 0)
self.emit("PyObject *m, *d;", 1)
self.emit("if (!init_types()) return;", 1)
self.emit('m = Py_InitModule3("_ast", NULL, NULL);', 1)
self.emit("if (!m) return;", 1)
self.emit("d = PyModule_GetDict(m);", 1)
self.emit('if (PyDict_SetItemString(d, "AST", (PyObject*)AST_type) < 0) return;', 1)
self.emit('if (PyModule_AddIntConstant(m, "PyCF_ONLY_AST", PyCF_ONLY_AST) < 0)', 1)
self.emit("return;", 2)
# Value of version: "$Revision: 53490 $"
self.emit('if (PyModule_AddStringConstant(m, "__version__", "%s") < 0)' % mod.version.value[12:-3], 1)
self.emit("return;", 2)
for dfn in mod.dfns:
self.visit(dfn)
self.emit("}", 0)
def visitProduct(self, prod, name):
self.addObj(name)
def visitSum(self, sum, name):
self.addObj(name)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
self.addObj(cons.name)
def addObj(self, name):
self.emit('if (PyDict_SetItemString(d, "%s", (PyObject*)%s_type) < 0) return;' % (name, name), 1)
_SPECIALIZED_SEQUENCES = ('stmt', 'expr')
def find_sequence(fields, doing_specialization):
"""Return True if any field uses a sequence."""
for f in fields:
if f.seq:
if not doing_specialization:
return True
if str(f.type) not in _SPECIALIZED_SEQUENCES:
return True
return False
def has_sequence(types, doing_specialization):
for t in types:
if find_sequence(t.fields, doing_specialization):
return True
return False
class StaticVisitor(PickleVisitor):
CODE = '''Very simple, always emit this static code. Overide CODE'''
def visit(self, object):
self.emit(self.CODE, 0, reflow=False)
class ObjVisitor(PickleVisitor):
def func_begin(self, name):
ctype = get_c_type(name)
self.emit("PyObject*", 0)
self.emit("ast2obj_%s(void* _o)" % (name), 0)
self.emit("{", 0)
self.emit("%s o = (%s)_o;" % (ctype, ctype), 1)
self.emit("PyObject *result = NULL, *value = NULL;", 1)
self.emit('if (!o) {', 1)
self.emit("Py_INCREF(Py_None);", 2)
self.emit('return Py_None;', 2)
self.emit("}", 1)
self.emit('', 0)
def func_end(self):
self.emit("return result;", 1)
self.emit("failed:", 0)
self.emit("Py_XDECREF(value);", 1)
self.emit("Py_XDECREF(result);", 1)
self.emit("return NULL;", 1)
self.emit("}", 0)
self.emit("", 0)
def visitSum(self, sum, name):
if is_simple(sum):
self.simpleSum(sum, name)
return
self.func_begin(name)
self.emit("switch (o->kind) {", 1)
for i in range(len(sum.types)):
t = sum.types[i]
self.visitConstructor(t, i + 1, name)
self.emit("}", 1)
for a in sum.attributes:
self.emit("value = ast2obj_%s(o->%s);" % (a.type, a.name), 1)
self.emit("if (!value) goto failed;", 1)
self.emit('if (PyObject_SetAttrString(result, "%s", value) < 0)' % a.name, 1)
self.emit('goto failed;', 2)
self.emit('Py_DECREF(value);', 1)
self.func_end()
def simpleSum(self, sum, name):
self.emit("PyObject* ast2obj_%s(%s_ty o)" % (name, name), 0)
self.emit("{", 0)
self.emit("switch(o) {", 1)
for t in sum.types:
self.emit("case %s:" % t.name, 2)
self.emit("Py_INCREF(%s_singleton);" % t.name, 3)
self.emit("return %s_singleton;" % t.name, 3)
self.emit("}", 1)
self.emit("return NULL; /* cannot happen */", 1)
self.emit("}", 0)
def visitProduct(self, prod, name):
self.func_begin(name)
self.emit("result = PyType_GenericNew(%s_type, NULL, NULL);" % name, 1);
self.emit("if (!result) return NULL;", 1)
for field in prod.fields:
self.visitField(field, name, 1, True)
self.func_end()
def visitConstructor(self, cons, enum, name):
self.emit("case %s_kind:" % cons.name, 1)
self.emit("result = PyType_GenericNew(%s_type, NULL, NULL);" % cons.name, 2);
self.emit("if (!result) goto failed;", 2)
for f in cons.fields:
self.visitField(f, cons.name, 2, False)
self.emit("break;", 2)
def visitField(self, field, name, depth, product):
def emit(s, d):
self.emit(s, depth + d)
if product:
value = "o->%s" % field.name
else:
value = "o->v.%s.%s" % (name, field.name)
self.set(field, value, depth)
emit("if (!value) goto failed;", 0)
emit('if (PyObject_SetAttrString(result, "%s", value) == -1)' % field.name, 0)
emit("goto failed;", 1)
emit("Py_DECREF(value);", 0)
def emitSeq(self, field, value, depth, emit):
emit("seq = %s;" % value, 0)
emit("n = asdl_seq_LEN(seq);", 0)
emit("value = PyList_New(n);", 0)
emit("if (!value) goto failed;", 0)
emit("for (i = 0; i < n; i++) {", 0)
self.set("value", field, "asdl_seq_GET(seq, i)", depth + 1)
emit("if (!value1) goto failed;", 1)
emit("PyList_SET_ITEM(value, i, value1);", 1)
emit("value1 = NULL;", 1)
emit("}", 0)
def set(self, field, value, depth):
if field.seq:
# XXX should really check for is_simple, but that requires a symbol table
if field.type.value == "cmpop":
# While the sequence elements are stored as void*,
# ast2obj_cmpop expects an enum
self.emit("{", depth)
self.emit("int i, n = asdl_seq_LEN(%s);" % value, depth+1)
self.emit("value = PyList_New(n);", depth+1)
self.emit("if (!value) goto failed;", depth+1)
self.emit("for(i = 0; i < n; i++)", depth+1)
# This cannot fail, so no need for error handling
self.emit("PyList_SET_ITEM(value, i, ast2obj_cmpop((cmpop_ty)asdl_seq_GET(%s, i)));" % value,
depth+2, reflow=False)
self.emit("}", depth)
else:
self.emit("value = ast2obj_list(%s, ast2obj_%s);" % (value, field.type), depth)
else:
ctype = get_c_type(field.type)
self.emit("value = ast2obj_%s(%s);" % (field.type, value), depth, reflow=False)
class PartingShots(StaticVisitor):
CODE = """
PyObject* PyAST_mod2obj(mod_ty t)
{
init_types();
return ast2obj_mod(t);
}
"""
class ChainOfVisitors:
def __init__(self, *visitors):
self.visitors = visitors
def visit(self, object):
for v in self.visitors:
v.visit(object)
v.emit("", 0)
def main(srcfile):
argv0 = sys.argv[0]
components = argv0.split(os.sep)
argv0 = os.sep.join(components[-2:])
auto_gen_msg = '/* File automatically generated by %s */\n' % argv0
mod = asdl.parse(srcfile)
if not asdl.check(mod):
sys.exit(1)
if INC_DIR:
p = "%s/%s-ast.h" % (INC_DIR, mod.name)
f = open(p, "wb")
print >> f, auto_gen_msg
print >> f, '#include "asdl.h"\n'
c = ChainOfVisitors(TypeDefVisitor(f),
StructVisitor(f),
PrototypeVisitor(f),
)
c.visit(mod)
print >>f, "PyObject* PyAST_mod2obj(mod_ty t);"
f.close()
if SRC_DIR:
p = os.path.join(SRC_DIR, str(mod.name) + "-ast.c")
f = open(p, "wb")
print >> f, auto_gen_msg
print >> f, '#include "Python.h"'
print >> f, '#include "%s-ast.h"' % mod.name
print >> f
print >>f, "static PyTypeObject* AST_type;"
v = ChainOfVisitors(
PyTypesDeclareVisitor(f),
PyTypesVisitor(f),
FunctionVisitor(f),
ObjVisitor(f),
ASTModuleVisitor(f),
PartingShots(f),
)
v.visit(mod)
f.close()
if __name__ == "__main__":
import sys
import getopt
INC_DIR = ''
SRC_DIR = ''
opts, args = getopt.getopt(sys.argv[1:], "h:c:")
if len(opts) != 1:
print "Must specify exactly one output file"
sys.exit(1)
for o, v in opts:
if o == '-h':
INC_DIR = v
if o == '-c':
SRC_DIR = v
if len(args) != 1:
print "Must specify single input file"
sys.exit(1)
main(args[0])
| TathagataChakraborti/resource-conflicts | PLANROB-2015/seq-sat-lama/Python-2.5.2/Parser/asdl_c.py | Python | mit | 25,788 | [
"VisIt"
] | 75f77e7378cd62a1f1d07dbb33b8c1f9b5c35b0ffbf7a18ca28bfe1f7a9d4612 |
# -*- coding: utf-8 -*-
"""
NeuroTools.signals.spikes
==================
A collection of functions to create, manipulate and play with spikes signals.
Classes
-------
SpikeTrain - object representing a spike train, for one cell. Useful for plots,
calculations such as ISI, CV, mean rate(), ...
SpikeList - object representing the activity of a population of neurons. Functions as a
dictionary of SpikeTrain objects, with methods to compute firing rate,
ISI, CV, cross-correlations, and so on.
Functions
---------
load_spikelist - load a SpikeList object from a file. Expects a particular format.
Can also load data in a different format, but then you have
to write your own File object that will know how to read the data (see io.py)
load - a generic loader for all the previous load methods.
See also NeuroTools.signals.analogs
"""
import os, re, numpy
import scipy.signal
import logging
from NeuroTools import check_dependency, check_numpy_version
from NeuroTools import analysis
from NeuroTools.io import *
from NeuroTools.plotting import get_display, set_axis_limits, set_labels, SimpleMultiplot, progress_bar
from .pairs import *
from .intervals import *
from NeuroTools import check_dependency
HAVE_MATPLOTLIB = check_dependency('matplotlib')
if HAVE_MATPLOTLIB:
import matplotlib
matplotlib.use('Agg')
HAVE_PYLAB = check_dependency('pylab')
if HAVE_PYLAB:
import pylab
else:
PYLAB_ERROR = "The pylab package was not detected"
if not HAVE_MATPLOTLIB:
MATPLOTLIB_ERROR = "The matplotlib package was not detected"
newnum = check_numpy_version()
# check whether numpy.histogram supports the deprecated "new" keyword.
hist_new = True
try:
numpy.histogram(numpy.array([1,2,3]), new=True)
except TypeError:
hist_new = False
class SpikeTrain(object):
"""
SpikeTrain(spikes_times, t_start=None, t_stop=None)
This class defines a spike train as a list of times events.
Event times are given in a list (sparse representation) in milliseconds.
Inputs:
spike_times - a list/numpy array of spike times (in milliseconds)
t_start - beginning of the SpikeTrain (if not, this is infered)
t_stop - end of the SpikeTrain (if not, this is infered)
Examples:
>> s1 = SpikeTrain([0.0, 0.1, 0.2, 0.5])
>> s1.isi()
array([ 0.1, 0.1, 0.3])
>> s1.mean_rate()
8.0
>> s1.cv_isi()
0.565685424949
"""
#######################################################################
## Constructor and key methods to manipulate the SpikeTrain objects ##
#######################################################################
def __init__(self, spike_times, t_start=None, t_stop=None):
#TODO: add information about sampling rate at time of creation
"""
Constructor of the SpikeTrain object
See also
SpikeTrain
"""
self.t_start = t_start
self.t_stop = t_stop
self.spike_times = numpy.array(spike_times, numpy.float32)
# If t_start is not None, we resize the spike_train keeping only
# the spikes with t >= t_start
if self.t_start is not None:
self.spike_times = numpy.extract((self.spike_times >= self.t_start), self.spike_times)
# If t_stop is not None, we resize the spike_train keeping only
# the spikes with t <= t_stop
if self.t_stop is not None:
self.spike_times = numpy.extract((self.spike_times <= self.t_stop), self.spike_times)
# We sort the spike_times. May be slower, but is necessary by the way for quite a
# lot of methods...
self.spike_times = numpy.sort(self.spike_times, kind="quicksort")
# Here we deal with the t_start and t_stop values if the SpikeTrain
# is empty, with only one element or several elements, if we
# need to guess t_start and t_stop
# no element : t_start = 0, t_stop = 0.1
# 1 element : t_start = time, t_stop = time + 0.1
# several : t_start = min(time), t_stop = max(time)
size = len(self.spike_times)
if size == 0:
if self.t_start is None:
self.t_start = 0
if self.t_stop is None:
self.t_stop = 0.1
elif size == 1: # spike list may be empty
if self.t_start is None:
self.t_start = self.spike_times[0]
if self.t_stop is None:
self.t_stop = self.spike_times[0] + 0.1
elif size > 1:
if self.t_start is None:
self.t_start = numpy.min(self.spike_times)
if numpy.any(self.spike_times < self.t_start):
raise ValueError("Spike times must not be less than t_start")
if self.t_stop is None:
self.t_stop = numpy.max(self.spike_times)
if numpy.any(self.spike_times > self.t_stop):
raise ValueError("Spike times must not be greater than t_stop")
if self.t_start >= self.t_stop :
raise Exception("Incompatible time interval : t_start = %s, t_stop = %s" % (self.t_start, self.t_stop))
if self.t_start < 0:
raise ValueError("t_start must not be negative")
if numpy.any(self.spike_times < 0):
raise ValueError("Spike times must not be negative")
def __str__(self):
return str(self.spike_times)
def __del__(self):
del self.spike_times
def __len__(self):
return len(self.spike_times)
def __getslice__(self, i, j):
"""
Return a sublist of the spike_times vector of the SpikeTrain
"""
return self.spike_times[i:j]
def time_parameters(self):
"""
Return the time parameters of the SpikeTrain (t_start, t_stop)
"""
return (self.t_start, self.t_stop)
def is_equal(self, spktrain):
"""
Return True if the SpikeTrain object is equal to one other SpikeTrain, i.e
if they have same time parameters and same spikes_times
Inputs:
spktrain - A SpikeTrain object
See also:
time_parameters()
"""
test = (self.time_parameters() == spktrain.time_parameters())
return numpy.all(self.spike_times == spktrain.spike_times) and test
def copy(self):
"""
Return a copy of the SpikeTrain object
"""
return SpikeTrain(self.spike_times, self.t_start, self.t_stop)
def duration(self):
"""
Return the duration of the SpikeTrain
"""
return self.t_stop - self.t_start
def merge(self, spiketrain, relative=False):
"""
Add the spike times from a spiketrain to the current SpikeTrain
Inputs:
spiketrain - The SpikeTrain that should be added
relative - if True, relative_times() is called on both spiketrains before merging
Examples:
>> a = SpikeTrain(range(0,100,10),0.1,0,100)
>> b = SpikeTrain(range(400,500,10),0.1,400,500)
>> a.merge(b)
>> a.spike_times
[ 0., 10., 20., 30., 40., 50., 60., 70., 80.,
90., 400., 410., 420., 430., 440., 450., 460., 470.,
480., 490.]
>> a.t_stop
500
"""
if relative:
self.relative_times()
spiketrain.relative_times()
self.spike_times = numpy.insert(self.spike_times, self.spike_times.searchsorted(spiketrain.spike_times), \
spiketrain.spike_times)
self.t_start = min(self.t_start, spiketrain.t_start)
self.t_stop = max(self.t_stop, spiketrain.t_stop)
def format(self, relative=False, quantized=False):
"""
Return an array with a new representation of the spike times
Inputs:
relative - if True, spike times are expressed in a relative
time compared to the previsous one
quantized - a value to divide spike times with before rounding
Examples:
>> st.spikes_times=[0, 2.1, 3.1, 4.4]
>> st.format(relative=True)
[0, 2.1, 1, 1.3]
>> st.format(quantized=2)
[0, 1, 2, 2]
"""
spike_times = self.spike_times.copy()
if relative and len(spike_times) > 0:
spike_times[1:] = spike_times[1:] - spike_times[:-1]
if quantized:
assert quantized > 0, "quantized must either be False or a positive number"
#spike_times = numpy.array([time/self.quantized for time in spike_times],int)
spike_times = (spike_times/quantized).round().astype('int')
return spike_times
def jitter(self,jitter):
"""
Returns a new SpikeTrain with spiketimes jittered by a normal distribution.
Inputs:
jitter - sigma of the normal distribution
Examples:
>> st_jittered = st.jitter(2.0)
"""
return SpikeTrain(self.spike_times+jitter*(numpy.random.normal(loc=0.0,scale=1.0,size=self.spike_times.shape[0])),t_start=self.t_start,t_stop=self.t_stop)
#######################################################################
## Analysis methods that can be applied to a SpikeTrain object ##
#######################################################################
def isi(self):
"""
Return an array with the inter-spike intervals of the SpikeTrain
Examples:
>> st.spikes_times=[0, 2.1, 3.1, 4.4]
>> st.isi()
[2.1, 1., 1.3]
See also
cv_isi
"""
return numpy.diff(self.spike_times)
def mean_rate(self, t_start=None, t_stop=None):
"""
Returns the mean firing rate between t_start and t_stop, in Hz
Inputs:
t_start - in ms. If not defined, the one of the SpikeTrain object is used
t_stop - in ms. If not defined, the one of the SpikeTrain object is used
Examples:
>> spk.mean_rate()
34.2
"""
if (t_start == None) & (t_stop == None):
t_start = self.t_start
t_stop = self.t_stop
idx = self.spike_times
else:
if t_start == None:
t_start = self.t_start
else:
t_start = max(self.t_start, t_start)
if t_stop == None:
t_stop=self.t_stop
else:
t_stop = min(self.t_stop, t_stop)
idx = numpy.where((self.spike_times >= t_start) & (self.spike_times <= t_stop))[0]
return 1000.*len(idx)/(t_stop-t_start)
def cv_isi(self):
"""
Return the coefficient of variation of the isis.
cv_isi is the ratio between the standard deviation and the mean of the ISI
The irregularity of individual spike trains is measured by the squared
coefficient of variation of the corresponding inter-spike interval (ISI)
distribution normalized by the square of its mean.
In point processes, low values reflect more regular spiking, a
clock-like pattern yields CV2= 0. On the other hand, CV2 = 1 indicates
Poisson-type behavior. As a measure for irregularity in the network one
can use the average irregularity across all neurons.
http://en.wikipedia.org/wiki/Coefficient_of_variation
See also
isi, cv_kl
"""
isi = self.isi()
if len(isi) > 0:
return numpy.std(isi)/numpy.mean(isi)
else:
logging.debug("Warning, a CV can't be computed because there are not enough spikes")
return numpy.nan
def cv_kl(self, bins=100):
"""
Provides a measure for the coefficient of variation to describe the
regularity in spiking networks. It is based on the Kullback-Leibler
divergence and decribes the difference between a given
interspike-interval-distribution and an exponential one (representing
poissonian spike trains) with equal mean.
It yields 1 for poissonian spike trains and 0 for regular ones.
Reference:
http://invibe.net/LaurentPerrinet/Publications/Voges08fens
Inputs:
bins - the number of bins used to gather the ISI
Examples:
>> spklist.cv_kl(100)
0.98
See also:
cv_isi
"""
isi = self.isi() / 1000.
if len(isi) < 2:
logging.debug("Warning, a CV can't be computed because there are not enough spikes")
return numpy.nan
else:
if not hist_new:
proba_isi, xaxis = numpy.histogram(isi, bins=bins, normed=True)
xaxis = xaxis[:-1]
else:
if newnum:
proba_isi, xaxis = numpy.histogram(isi, bins=bins, normed=True, new=True)
xaxis = xaxis[:-1]
else:
proba_isi, xaxis = numpy.histogram(isi, bins=bins, normed=True)
proba_isi /= numpy.sum(proba_isi)
bin_size = xaxis[1]-xaxis[0]
# differential entropy: http://en.wikipedia.org/wiki/Differential_entropy
KL = - numpy.sum(proba_isi * numpy.log(proba_isi+1e-16)) + numpy.log(bin_size)
KL -= -numpy.log(self.mean_rate()) + 1.
CVkl=numpy.exp(-KL)
return CVkl
def fano_factor_isi(self):
"""
Return the fano factor of this spike trains ISI.
The Fano Factor is defined as the variance of the isi divided by the mean of the isi
http://en.wikipedia.org/wiki/Fano_factor
See also
isi, cv_isi
"""
isi = self.isi()
if len(isi) > 0:
fano = numpy.var(isi)/numpy.mean(isi)
return fano
else:
raise Exception("No spikes in the SpikeTrain !")
def time_axis(self, time_bin=10):
"""
Return a time axis between t_start and t_stop according to a time_bin
Inputs:
time_bin - the bin width
Examples:
>> st = SpikeTrain(range(100),0.1,0,100)
>> st.time_axis(10)
[ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
See also
time_histogram
"""
if newnum:
axis = numpy.arange(self.t_start, self.t_stop+time_bin, time_bin)
else:
axis = numpy.arange(self.t_start, self.t_stop, time_bin)
return axis
def raster_plot(self, t_start=None, t_stop=None, interval=None, display=True, kwargs={}):
"""
Generate a raster plot with the SpikeTrain in a subwindow of interest,
defined by t_start and t_stop.
Inputs:
t_start - in ms. If not defined, the one of the SpikeTrain object is used
t_stop - in ms. If not defined, the one of the SpikeTrain object is used
display - if True, a new figure is created. Could also be a subplot
kwargs - dictionary contening extra parameters that will be sent to the plot
function
Examples:
>> z = subplot(221)
>> st.raster_plot(display=z, kwargs={'color':'r'})
See also
SpikeList.raster_plot
"""
if t_start is None: t_start = self.t_start
if t_stop is None: t_stop = self.t_stop
if interval is None:
interval = Interval(t_start, t_stop)
spikes = interval.slice_times(self.spike_times)
subplot = get_display(display)
if not subplot or not HAVE_PYLAB:
print(PYLAB_ERROR)
else:
if len(spikes) > 0:
subplot.plot(spikes,numpy.ones(len(spikes)),',', **kwargs)
xlabel = "Time (ms)"
ylabel = "Neurons #"
set_labels(subplot, xlabel, ylabel)
pylab.draw()
def time_offset(self, offset):
"""
Add an offset to the SpikeTrain object. t_start and t_stop are
shifted from offset, so does all the spike times.
Inputs:
offset - the time offset, in ms
Examples:
>> spktrain = SpikeTrain(arange(0,100,10))
>> spktrain.time_offset(50)
>> spklist.spike_times
[ 50., 60., 70., 80., 90., 100., 110.,
120., 130., 140.]
"""
self.t_start += offset
self.t_stop += offset
self.spike_times += offset
def time_slice(self, t_start, t_stop):
"""
Return a new SpikeTrain obtained by slicing between t_start and t_stop,
where t_start and t_stop may either be single values or sequences of
start and stop times.
Inputs:
t_start - begining of the new SpikeTrain, in ms.
t_stop - end of the new SpikeTrain, in ms.
Examples:
>> spk = spktrain.time_slice(0,100)
>> spk.t_start
0
>> spk.t_stop
100
>>> spk = spktrain.time_slice([20,70], [40,90])
>>> spk.t_start
20
>>> spk.t_stop
90
>>> len(spk.time_slice(41, 69))
0
"""
if hasattr(t_start, '__len__'):
if len(t_start) != len(t_stop):
raise ValueError("t_start has %d values and t_stop %d. They must be of the same length." % (len(t_start), len(t_stop)))
mask = False
for t0,t1 in zip(t_start, t_stop):
mask = mask | ((self.spike_times >= t0) & (self.spike_times <= t1))
t_start = t_start[0]
t_stop = t_stop[-1]
else:
mask = (self.spike_times >= t_start) & (self.spike_times <= t_stop)
spikes = numpy.extract(mask, self.spike_times)
return SpikeTrain(spikes, t_start, t_stop)
def interval_slice(self, interval):
"""
Return a new SpikeTrain obtained by slicing with an Interval. The new
t_start and t_stop values of the returned SpikeTrain are the extrema of the Interval
Inputs:
interval - The interval from which spikes should be extracted
Examples:
>> spk = spktrain.time_slice(0,100)
>> spk.t_start
0
>> spk.t_stop
100
"""
times = interval.slice_times(self.spike_times)
t_start, t_stop = interval.time_parameters()
return SpikeTrain(times, t_start, t_stop)
def time_histogram(self, time_bin=10, normalized=True, binary=False):
"""
Bin the spikes with the specified bin width. The first and last bins
are calculated from `self.t_start` and `self.t_stop`.
Inputs:
time_bin - the bin width for gathering spikes_times
normalized - if True, the bin values are scaled to represent firing rates
in spikes/second, otherwise otherwise it's the number of spikes
per bin.
binary - if True, a binary matrix of 0/1 is returned
Examples:
>> st=SpikeTrain(range(0,100,5),0.1,0,100)
>> st.time_histogram(10)
[200, 200, 200, 200, 200, 200, 200, 200, 200, 200]
>> st.time_histogram(10, normalized=False)
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
See also
time_axis
"""
bins = self.time_axis(time_bin)
if newnum:
if not hist_new:
hist, edges = numpy.histogram(self.spike_times, bins)
else:
hist, edges = numpy.histogram(self.spike_times, bins, new=newnum)
else:
hist, edges = numpy.histogram(self.spike_times, bins)
hist = hist.astype(float)
if normalized: # what about normalization if time_bin is a sequence?
hist *= 1000.0/float(time_bin)
if binary:
hist = hist.astype(bool).astype(int)
return hist
def instantaneous_rate(self, resolution, kernel, norm, m_idx=None,
t_start=None, t_stop=None, acausal=True, trim=False):
"""
Estimate instantaneous firing rate by kernel convolution.
Inputs:
resolution - time stamp resolution of the spike times (ms). the
same resolution will be assumed for the kernel
kernel - kernel function used to convolve with
norm - normalization factor associated with kernel function
(see analysis.make_kernel for details)
t_start - start time of the interval used to compute the firing
rate
t_stop - end time of the interval used to compute the firing
rate (included)
acausal - if True, acausal filtering is used, i.e., the gravity
center of the filter function is aligned with the
spike to convolve
m_idx - index of the value in the kernel function vector that
corresponds to its gravity center. this parameter is
not mandatory for symmetrical kernels but it is
required when assymmetrical kernels are to be aligned
at their gravity center with the event times
trim - if True, only the 'valid' region of the convolved
signal are returned, i.e., the points where there
isn't complete overlap between kernel and spike train
are discarded
NOTE: if True and an assymetrical kernel is provided
the output will not be aligned with [t_start, t_stop]
See also:
analysis.make_kernel
"""
if t_start is None:
t_start = self.t_start
if t_stop is None:
t_stop = self.t_stop
if m_idx is None:
m_idx = kernel.size / 2
time_vector = numpy.zeros(int((t_stop - t_start)/resolution + 1))
spikes_slice = self.spike_times[(self.spike_times >= t_start) & (
self.spike_times <= t_stop)]
for spike in spikes_slice:
index = int((spike - t_start) / resolution)
time_vector[index] = 1
r = norm * scipy.signal.fftconvolve(time_vector, kernel, 'full')
if acausal is True:
if trim is False:
r = r[m_idx:-(kernel.size - m_idx)]
t_axis = numpy.linspace(t_start, t_stop, r.size)
return t_axis, r
elif trim is True:
r = r[2 * m_idx:-2*(kernel.size - m_idx)]
t_start = t_start + m_idx * resolution
t_stop = t_stop - ((kernel.size) - m_idx) * resolution
t_axis = numpy.linspace(t_start, t_stop, r.size)
return t_axis, r
if acausal is False:
if trim is False:
r = r[m_idx:-(kernel.size - m_idx)]
t_axis = (numpy.linspace(t_start, t_stop, r.size) +
m_idx * resolution)
return t_axis, r
elif trim is True:
r = r[2 * m_idx:-2*(kernel.size - m_idx)]
t_start = t_start + m_idx * resolution
t_stop = t_stop - ((kernel.size) - m_idx) * resolution
t_axis = (numpy.linspace(t_start, t_stop, r.size) +
m_idx * resolution)
return t_axis, r
def relative_times(self):
"""
Rescale the spike times to make them relative to t_start.
Note that the SpikeTrain object itself is modified, t_start
is subtracted from spike_times, t_start and t_stop
"""
if self.t_start != 0:
self.spike_times -= self.t_start
self.t_stop -= self.t_start
self.t_start = 0.0
def distance_victorpurpura(self, spktrain, cost=0.5):
"""
Function to calculate the Victor-Purpura distance between two spike trains.
See J. D. Victor and K. P. Purpura,
Nature and precision of temporal coding in visual cortex: a metric-space
analysis.,
J Neurophysiol,76(2):1310-1326, 1996
Inputs:
spktrain - the other SpikeTrain
cost - The cost parameter. See the paper for more information
"""
nspk_1 = len(self)
nspk_2 = len(spktrain)
if cost == 0:
return abs(nspk_1-nspk_2)
elif cost > 1e9 :
return nspk_1+nspk_2
scr = numpy.zeros((nspk_1+1,nspk_2+1))
scr[:,0] = numpy.arange(0,nspk_1+1)
scr[0,:] = numpy.arange(0,nspk_2+1)
if nspk_1 > 0 and nspk_2 > 0:
for i in xrange(1, nspk_1+1):
for j in xrange(1, nspk_2+1):
scr[i,j] = min(scr[i-1,j]+1,scr[i,j-1]+1)
scr[i,j] = min(scr[i,j],scr[i-1,j-1]+cost*abs(self.spike_times[i-1]-spktrain.spike_times[j-1]))
return scr[nspk_1,nspk_2]
def distance_kreuz(self, spktrain, dt=0.1):
"""
Function to calculate the Kreuz/Politi distance between two spike trains
See Kreuz, T.; Haas, J.S.; Morelli, A.; Abarbanel, H.D.I. & Politi, A.
Measuring spike train synchrony.
J Neurosci Methods, 165:151-161, 2007
Inputs:
spktrain - the other SpikeTrain
dt - the bin width used to discretize the spike times
Examples:
>> spktrain.KreuzDistance(spktrain2)
See also
VictorPurpuraDistance
"""
N = (self.t_stop-self.t_start)/dt
vec_1 = numpy.zeros(N, numpy.float32)
vec_2 = numpy.zeros(N, numpy.float32)
result = numpy.zeros(N, float)
idx_spikes = numpy.array(self.spike_times/dt,int)
previous_spike = 0
if len(idx_spikes) > 0:
for spike in idx_spikes[1:]:
vec_1[previous_spike:spike] = (spike-previous_spike)
previous_spike = spike
idx_spikes = numpy.array(spktrain.spike_times/dt,int)
previous_spike = 0
if len(idx_spikes) > 0:
for spike in idx_spikes[1:]:
vec_2[previous_spike:spike] = (spike-previous_spike)
previous_spike = spike
idx = numpy.where(vec_1 < vec_2)[0]
result[idx] = vec_1[idx]/vec_2[idx] - 1
idx = numpy.where(vec_1 > vec_2)[0]
result[idx] = -vec_2[idx]/vec_1[idx] + 1
return numpy.sum(numpy.abs(result))/len(result)
def psth(self, events, time_bin=2, t_min=50, t_max=50, display = False, kwargs={}, average=True):
"""
Return the psth of the spike times contained in the SpikeTrain according to selected events,
on a time window t_spikes - tmin, t_spikes + tmax
Inputs:
events - Can be a SpikeTrain object (and events will be the spikes) or just a list
of times
time_bin- The time bin (in ms) used to gather the spike for the psth
t_min - Time (>0) to average the signal before an event, in ms (default 0)
t_max - Time (>0) to average the signal after an event, in ms (default 100)
display - if True, a new figure is created. Could also be a subplot.
kwargs - dictionary contening extra parameters that will be sent to the plot
function
Examples:
>> spk.psth(spktrain, t_min = 50, t_max = 150)
>> spk.psth(spktrain, )
>> spk.psth(range(0,1000,10), display=True)
See also
SpikeTrain.spike_histogram
"""
if isinstance(events, SpikeTrain):
events = events.spike_times
assert (t_min >= 0) and (t_max >= 0), "t_min and t_max should be greater than 0"
assert len(events) > 0, "events should not be empty and should contained at least one element"
spk_hist = self.time_histogram(time_bin)
subplot = get_display(display)
count = 0
t_min_l = numpy.floor(t_min/time_bin)
t_max_l = numpy.floor(t_max/time_bin)
result = numpy.zeros((t_min_l+t_max_l), numpy.float32)
t_start = numpy.floor(self.t_start/time_bin)
t_stop = numpy.floor(self.t_stop/time_bin)
result = []
for ev in events:
ev = numpy.floor(ev/time_bin)
if ((ev - t_min_l )> t_start) and (ev + t_max_l ) < t_stop:
count += 1
result += [spk_hist[(ev-t_min_l):ev+t_max_l]]
result = numpy.array(result)
if average:
result /= count
if not subplot or not HAVE_PYLAB:
return result
else:
xlabel = "Time (ms)"
ylabel = "PSTH"
time = numpy.linspace(-t_min, t_max, (t_min+t_max)/time_bin)
set_labels(subplot, xlabel, ylabel)
subplot.plot(time, result, c='k', **kwargs)
xmin, xmax, ymin, ymax = subplot.axis()
subplot.plot([0,0],[ymin, ymax], c='r')
set_axis_limits(subplot, -t_min, t_max, ymin, ymax)
pylab.draw()
return result
####################################################################
### TOO SPECIFIC METHOD ?
### Better documentation
####################################################################
def tuning_curve(self, var_array, normalized=False, method='sum'):
"""
Calculate a firing-rate tuning curve with respect to some variable.
Assumes that some variable, such as stimulus orientation, varies
throughout the recording. The values taken by this variable should be
supplied in a numpy array `var_array`. The spike train is binned
according to the number of values in `var_array`, e.g., if there are
N values, the spikes are binned with a bin width
(`self.t_stop`-`self.t_start`)/N
so that each bin is associated with a particular value of the variable
in `var_array`.
The return value is a dictionary whose keys are the distinct values in
`val_array`. The values in the dict depend on the arguments `method` and
`normalized`.
If `normalized` is False, the responses (bin values) are spike counts,
if True, they are firing rates.
If `method` == "max", each value is the maximum response for a given
value of the variable.
If `method` == "sum", each value is the summed response...
If `method` == "mean", ... you get the idea.
(If someone can rewrite this more clearly, please do so!)
"""
binwidth = (self.t_stop - self.t_start)/len(var_array)
time_histogram = self.time_histogram(binwidth, normalized=normalized)
assert len(time_histogram) == len(var_array)
tuning_curve = {}
counts = {}
for k, x in zip(var_array, time_histogram):
if not tuning_curve.has_key(k):
tuning_curve[k] = 0
counts[k] = 0
if method in ('sum', 'mean'):
tuning_curve[k] += x
counts[k] += 1
elif method == 'max':
tuning_curve[k] = max(x, tuning_curve[k])
else:
raise Exception()
if method == 'mean':
for k in tuning_curve.keys():
tuning_curve[k] = tuning_curve[k]/float(counts[k])
return tuning_curve
####################################################################
### TOO SPECIFIC METHOD ?
### Better documentation
####################################################################
def frequency_spectrum(self, time_bin):
"""
Returns the frequency spectrum of the time histogram together with the
frequency axis.
"""
hist = self.time_histogram(time_bin, normalized=False)
from NeuroTools import analysis
freq_spect = analysis.simple_frequency_spectrum(hist)
freq_bin = 1000.0/self.duration() # Hz
freq_axis = numpy.arange(len(freq_spect)) * freq_bin
return freq_spect, freq_axis
####################################################################
### TOO SPECIFIC METHOD ?
### Better documentation
####################################################################
def f1f0_ratio(self, time_bin, f_stim):
"""
Returns the F1/F0 amplitude ratio where the input stimulus frequency is
f_stim.
"""
freq_spect = self.frequency_spectrum(time_bin)[0]
F0 = freq_spect[0]
freq_bin = 1000.0/self.duration()
try:
F1 = freq_spect[int(round(f_stim/freq_bin))]
except IndexError:
errmsg = "time_bin (%f) too large to estimate F1/F0 ratio for an input frequency of %f" % (time_bin, f_stim)
errmsg += "\nFrequency_spectrum: %s" % freq_spect
raise Exception(errmsg)
return F1/F0
class SpikeList(object):
"""
SpikeList(spikes, id_list, t_start=None, t_stop=None, dims=None)
Return a SpikeList object which will be a list of SpikeTrain objects.
Inputs:
spikes - a list of (id,time) tuples (id being in id_list)
id_list - the list of the ids of all recorded cells (needed for silent cells)
t_start - begining of the SpikeList, in ms. If None, will be infered from the data
t_stop - end of the SpikeList, in ms. If None, will be infered from the data
dims - dimensions of the recorded population, if not 1D population
t_start and t_stop are shared for all SpikeTrains object within the SpikeList
Examples:
>> sl = SpikeList([(0, 0.1), (1, 0.1), (0, 0.2)], range(2))
>> type( sl[0] )
<type SpikeTrain>
See also
load_spikelist
"""
#######################################################################
## Constructor and key methods to manipulate the SpikeList objects ##
#######################################################################
def __init__(self, spikes, id_list, t_start=None, t_stop=None, dims=None):
"""
Constructor of the SpikeList object
See also
SpikeList, load_spikelist
"""
self.t_start = t_start
self.t_stop = t_stop
self.dimensions = dims
self.spiketrains = {}
id_list = numpy.sort(id_list)
##### Implementaion base on pure Numpy arrays, that seems to be faster for
## large spike files. Still not very efficient in memory, because we are not
## using a generator to build the SpikeList...
if not isinstance(spikes, numpy.ndarray): # is an array:
spikes = numpy.array(spikes, numpy.float32)
N = len(spikes)
if N > 0:
idx = numpy.argsort(spikes[:,0])
spikes = spikes[idx]
logging.debug("sorted spikes[:10,:] = %s" % str(spikes[:10,:]))
break_points = numpy.where(numpy.diff(spikes[:, 0]) > 0)[0] + 1
break_points = numpy.concatenate(([0], break_points))
break_points = numpy.concatenate((break_points, [N]))
for idx in xrange(len(break_points)-1):
id = spikes[break_points[idx], 0]
if id in id_list:
self.spiketrains[id] = SpikeTrain(spikes[break_points[idx]:break_points[idx+1], 1], self.t_start, self.t_stop)
self.complete(id_list)
if len(self) > 0 and (self.t_start is None or self.t_stop is None):
self.__calc_startstop()
del spikes
def __del__(self):
for id in self.id_list:
del self.spiketrains[id]
@property
def id_list(self):
"""
Return the list of all the cells ids contained in the
SpikeList object
Examples
>> spklist.id_list
[0,1,2,3,....,9999]
"""
return numpy.array(self.spiketrains.keys(), int)
def copy(self):
"""
Return a copy of the SpikeList object
"""
spklist = SpikeList([], [], self.t_start, self.t_stop, self.dimensions)
for id in self.id_list:
spklist.append(id, self.spiketrains[id])
return spklist
def __calc_startstop(self):
"""
t_start and t_stop are shared for all neurons, so we take min and max values respectively.
TO DO : check the t_start and t_stop parameters for a SpikeList. Is it commun to
all the spikeTrains within the spikelist or each spikelistes do need its own.
"""
if len(self) > 0:
if self.t_start is None:
start_times = numpy.array([self.spiketrains[idx].t_start for idx in self.id_list], numpy.float32)
self.t_start = numpy.min(start_times)
logging.debug("Warning, t_start is infered from the data : %f" %self.t_start)
for id in self.spiketrains.keys():
self.spiketrains[id].t_start = self.t_start
if self.t_stop is None:
stop_times = numpy.array([self.spiketrains[idx].t_stop for idx in self.id_list], numpy.float32)
self.t_stop = numpy.max(stop_times)
logging.debug("Warning, t_stop is infered from the data : %f" %self.t_stop)
for id in self.spiketrains.keys():
self.spiketrains[id].t_stop = self.t_stop
else:
raise Exception("No SpikeTrains")
def __getitem__(self, id):
if id in self.id_list:
return self.spiketrains[id]
else:
raise Exception("id %d is not present in the SpikeList. See id_list" %id)
def __getslice__(self, i, j):
"""
Return a new SpikeList object with all the ids between i and j
"""
ids = numpy.where((self.id_list >= i) & (self.id_list < j))[0]
return self.id_slice(ids)
#def __setslice__(self, i, j):
def __setitem__(self, id, spktrain):
assert isinstance(spktrain, SpikeTrain), "A SpikeList object can only contain SpikeTrain objects"
self.spiketrains[id] = spktrain
self.__calc_startstop()
def __iter__(self):
return self.spiketrains.itervalues()
def __len__(self):
return len(self.spiketrains)
def __sub_id_list(self, sub_list=None):
"""
Internal function used to get a sublist for the Spikelist id list
Inputs:
sublist - can be an int (and then N random cells are selected). Otherwise
sub_list is a list of cell in self.id_list. If None, id_list is returned
Examples:
>> self.__sub_id_list(50)
"""
if sub_list == None:
return self.id_list
elif type(sub_list) == int:
#import ipdb
#ipdb.set_trace()
return numpy.random.permutation(self.id_list)[0:sub_list]
else:
return sub_list
def __select_with_pairs__(self, nb_pairs, pairs_generator):
"""
Internal function used to slice two SpikeList according to a list
of pairs. Return a list of pairs
Inputs:
nb_pairs - an int specifying the number of cells desired
pairs_generator - a pairs generator
Examples:
>> self.__select_with_pairs__(50, RandomPairs(spk1, spk2))
See also
RandomPairs, AutoPairs, CustomPairs
"""
pairs = pairs_generator.get_pairs(nb_pairs)
spk1 = pairs_generator.spk1.id_slice(pairs[:,0])
spk2 = pairs_generator.spk2.id_slice(pairs[:,1])
return spk1, spk2, pairs
def append(self, id, spktrain):
"""
Add a SpikeTrain object to the SpikeList
Inputs:
id - the id of the new cell
spktrain - the SpikeTrain object representing the new cell
The SpikeTrain object is sliced according to the t_start and t_stop times
of the SpikeLlist object
Examples
>> st=SpikeTrain(range(0,100,5),0.1,0,100)
>> spklist.append(999, st)
spklist[999]
See also
concatenate, __setitem__
"""
assert isinstance(spktrain, SpikeTrain), "A SpikeList object can only contain SpikeTrain objects"
if id in self.id_list:
raise Exception("id %d already present in SpikeList. Use __setitem__ (spk[id]=...) instead()" %id)
else:
self.spiketrains[id] = spktrain.time_slice(self.t_start, self.t_stop)
def time_parameters(self):
"""
Return the time parameters of the SpikeList (t_start, t_stop)
"""
return (self.t_start, self.t_stop)
def jitter(self,jitter):
"""
Returns a new SpikeList with spiketimes jittered by a normal distribution.
Inputs:
jitter - sigma of the normal distribution
Examples:
>> st_jittered = st.jitter(2.0)
"""
new_SpkList = SpikeList([], [], self.t_start, self.t_stop, self.dimensions)
for id in self.id_list:
new_SpkList.append(id, self.spiketrains[id].jitter(jitter))
return new_SpkList
def time_axis(self, time_bin):
"""
Return a time axis between t_start and t_stop according to a time_bin
Inputs:
time_bin - the bin width
See also
spike_histogram
"""
if newnum:
axis = numpy.arange(self.t_start, self.t_stop+time_bin, time_bin)
else:
axis = numpy.arange(self.t_start, self.t_stop, time_bin)
return axis
def concatenate(self, spklists):
"""
Concatenation of SpikeLists to the current SpikeList.
Inputs:
spklists - could be a single SpikeList or a list of SpikeLists
The concatenated SpikeLists must have similar (t_start, t_stop), and
they can't shared similar cells. All their ids have to be different.
See also
append, merge, __setitem__
"""
if isinstance(spklists, SpikeList):
spklists = [spklists]
# We check that Spike Lists have similar time_axis
for sl in spklists:
if not sl.time_parameters() == self.time_parameters():
raise Exception("Spike Lists should have similar time_axis")
for sl in spklists:
for id in sl.id_list:
self.append(id, sl.spiketrains[id])
def merge(self, spikelist, relative=False):
"""
For each cell id in spikelist that matches an id in this SpikeList,
merge the two SpikeTrains and save the result in this SpikeList.
Note that SpikeTrains with ids not in this SpikeList are appended to it.
Inputs:
spikelist - the SpikeList that should be merged to the current one
relative - if True, spike times are expressed in a relative
time compared to the previsous one
Examples:
>> spklist.merge(spklist2)
See also:
concatenate, append, __setitem__
"""
for id, spiketrain in spikelist.spiketrains.items():
if id in self.id_list:
self.spiketrains[id].merge(spiketrain, relative)
else:
if relative:
spiketrain.relative_times()
self.append(id, spiketrain)
self.__calc_startstop()
def complete(self, id_list):
"""
Complete the SpikeList by adding Sempty SpikeTrain for all the ids present in
ids that will not already be in the SpikeList
Inputs:
id_list - The id_list that should be completed
Examples:
>> spklist.id_list
[0,2,5]
>> spklist.complete(arange(5))
>> spklist.id_list
[0,1,2,3,4]
"""
id_list = set(id_list)
missing_ids = id_list.difference(set(self.id_list))
for id in missing_ids:
self.append(id, SpikeTrain([],self.t_start, self.t_stop))
def id_slice(self, id_list):
"""
Return a new SpikeList obtained by selecting particular ids
Inputs:
id_list - Can be an integer (and then N random cells will be selected)
or a sublist of the current ids
The new SpikeList inherits the time parameters (t_start, t_stop)
Examples:
>> spklist.id_list
[830, 1959, 1005, 416, 1011, 1240, 729, 59, 1138, 259]
>> new_spklist = spklist.id_slice(5)
>> new_spklist.id_list
[1011, 729, 1138, 416, 59]
See also
time_slice, interval_slice
"""
new_SpkList = SpikeList([], [], self.t_start, self.t_stop, self.dimensions)
id_list = self.__sub_id_list(id_list)
for id in id_list:
try:
new_SpkList.append(id, self.spiketrains[id])
except Exception:
logging.debug("id %d is not in the source SpikeList or already in the new one" %id)
return new_SpkList
def time_slice(self, t_start, t_stop):
"""
Return a new SpikeList obtained by slicing between t_start and t_stop
Inputs:
t_start - begining of the new SpikeTrain, in ms.
t_stop - end of the new SpikeTrain, in ms.
See also
id_slice, interval_slice
"""
new_SpkList = SpikeList([], [], t_start, t_stop, self.dimensions)
for id in self.id_list:
new_SpkList.append(id, self.spiketrains[id].time_slice(t_start, t_stop))
new_SpkList.__calc_startstop()
return new_SpkList
def interval_slice(self, interval):
"""
Return a new SpikeList obtained by slicing with an Interval. The new
t_start and t_stop values of the returned SpikeList are the extrema of the Interval
Inputs:
interval - The Interval to slice with
See also
id_slice, time_slice
"""
t_start, t_stop = interval.time_parameters()
new_SpkList = SpikeList([], [], t_start, t_stop, self.dimensions)
for id in self.id_list:
new_SpkList.append(id, self.spiketrains[id].interval_slice(interval))
return new_SpkList
def time_offset(self, offset):
"""
Add an offset to the whole SpikeList object. t_start and t_stop are
shifted from offset, so does all the SpikeTrain.
Inputs:
offset - the time offset, in ms
Examples:
>> spklist.t_start
1000
>> spklist.time_offset(50)
>> spklist.t_start
1050
"""
self.t_start += offset
self.t_stop += offset
for id in self.id_list:
self.spiketrains[id].time_offset(offset)
def id_offset(self, offset):
"""
Add an offset to the whole SpikeList object. All the id are shifted
according to an offset value.
Inputs:
offset - the id offset
Examples:
>> spklist.id_list
[0,1,2,3,4]
>> spklist.id_offset(10)
>> spklist.id_list
[10,11,12,13,14]
"""
id_list = numpy.sort(self.id_list)
N = len(id_list)
if (offset > 0):
for idx in xrange(1, len(id_list)+1):
id = id_list[N-idx]
spk = self.spiketrains.pop(id)
self.spiketrains[id + offset] = spk
if (offset < 0):
for idx in xrange(0, len(id_list)):
id = id_list[idx]
spk = self.spiketrains.pop(id)
self.spiketrains[id + offset] = spk
def first_spike_time(self):
"""
Get the time of the first real spike in the SpikeList
"""
first_spike = self.t_stop
is_empty = True
for id in self.id_list:
if len(self.spiketrains[id]) > 0:
is_empty = False
if self.spiketrains[id].spike_times[0] < first_spike:
first_spike = self.spiketrains[id].spike_times[0]
if is_empty:
raise Exception("No spikes can be found in the SpikeList object !")
else:
return first_spike
def last_spike_time(self):
"""
Get the time of the last real spike in the SpikeList
"""
last_spike = self.t_start
is_empty = True
for id in self.id_list:
if len(self.spiketrains[id]) > 0:
is_empty = False
if self.spiketrains[id].spike_times[-1] > last_spike:
last_spike = self.spiketrains[id].spike_times[-1]
if is_empty:
raise Exception("No spikes can be found in the SpikeList object !")
else:
return last_spike
def select_ids(self, criteria):
"""
Return the list of all the cells in the SpikeList that will match the criteria
expressed with the following syntax.
Inputs :
criteria - a string that can be evaluated on a SpikeTrain object, where the
SpikeTrain should be named ``cell''.
Exemples:
>> spklist.select_ids("cell.mean_rate() > 0") (all the active cells)
>> spklist.select_ids("cell.mean_rate() == 0") (all the silent cells)
>> spklist.select_ids("len(cell.spike_times) > 10")
>> spklist.select_ids("mean(cell.isi()) < 1")
"""
selected_ids = []
for id in self.id_list:
cell = self.spiketrains[id]
if eval(criteria):
selected_ids.append(id)
return selected_ids
def sort_by(self, criteria, descending=False):
"""
Return an array with all the ids of the cells in the SpikeList,
sorted according to a particular criteria.
Inputs:
criteria - the criteria used to sort the cells. It should be a string
that can be evaluated on a SpikeTrain object, where the
SpikeTrain should be named ``cell''.
descending - if True, then the cells are sorted from max to min.
Examples:
>> spk.sort_by('cell.mean_rate()')
>> spk.sort_by('cell.cv_isi()', descending=True)
>> spk.sort_by('cell.distance_victorpurpura(target, 0.05)')
"""
criterias = numpy.zeros(len(self), float)
for count, id in enumerate(self.id_list):
cell = self.spiketrains[id]
criterias[count] = eval(criteria)
result = self.id_list[numpy.argsort(criterias)]
if descending:
return result[numpy.arange(len(result)-1, -1, -1)]
else:
return result
def save(self, user_file):
"""
Save the SpikeList in a text or binary file
Inputs:
user_file - The user file that will have its own read/write method
By default, if s tring is provided, a StandardTextFile object
will be created. Nevertheless, you can also
provide a StandardPickleFile
Examples:
>> spk.save("spikes.txt")
>> spk.save(StandardTextFile("spikes.txt"))
>> spk.save(StandardPickleFile("spikes.pck"))
See also:
DataHandler
"""
spike_loader = DataHandler(user_file, self)
spike_loader.save()
#######################################################################
## Analysis methods that can be applied to a SpikeTrain object ##
#######################################################################
def isi(self):
"""
Return the list of all the isi vectors for all the SpikeTrains objects
within the SpikeList.
See also:
isi_hist
"""
isis = []
for id in self.id_list:
isis.append(self.spiketrains[id].isi())
return isis
def isi_hist(self, bins=50, display=False, kwargs={}):
"""
Return the histogram of the ISI.
Inputs:
bins - the number of bins (between the min and max of the data)
or a list/array containing the lower edges of the bins.
display - if True, a new figure is created. Could also be a subplot
kwargs - dictionary contening extra parameters that will be sent to the plot
function
Examples:
>> z = subplot(221)
>> spklist.isi_hist(10, display=z, kwargs={'color':'r'})
See also:
isi
"""
isis = numpy.concatenate(self.isi())
if newnum:
if not hist_new:
values, xaxis = numpy.histogram(isis, bins=bins)
xaxis = xaxis[:-1]
else:
values, xaxis = numpy.histogram(isis, bins=bins, new=newnum)
xaxis = xaxis[:-1]
else:
values, xaxis = numpy.histogram(isis, bins=bins, new=True)
subplot = get_display(display)
values = values/float(values.sum())
if not subplot or not HAVE_PYLAB:
return values, xaxis
else:
xlabel = "Inter Spike Interval (ms)"
ylabel = "Probability"
set_labels(subplot, xlabel, ylabel)
subplot.plot(xaxis, values, **kwargs)
subplot.set_yticks([]) # arbitrary units
pylab.draw()
def cv_isi(self, float_only=False):
"""
Return the list of all the CV coefficients for each SpikeTrains object
within the SpikeList. Return NaN when not enough spikes are present
Inputs:
float_only - False by default. If true, NaN values are automatically
removed
Examples:
>> spklist.cv_isi()
[0.2,0.3,Nan,2.5,Nan,1.,2.5]
>> spklist.cv_isi(True)
[0.2,0.3,2.5,1.,2.5]
See also:
cv_isi_hist, cv_local, cv_kl, SpikeTrain.cv_isi
"""
ids = self.id_list
N = len(ids)
cvs_isi = numpy.empty(N)
for idx in xrange(N):
cvs_isi[idx] = self.spiketrains[ids[idx]].cv_isi()
if float_only:
cvs_isi = numpy.extract(numpy.logical_not(numpy.isnan(cvs_isi)),cvs_isi)
return cvs_isi
def cv_kl(self, bins = 50, float_only=False):
"""
Return the list of all the CV coefficients for each SpikeTrains object
within the SpikeList. Return NaN when not enough spikes are present
Inputs:
bins - The number of bins used to gathered the ISI
float_only - False by default. If true, NaN values are automatically
removed
Examples:
>> spklit.cv_kl(50)
[0.4, Nan, 0.9, nan]
>> spklist.cv_kl(50, True)
[0.4, 0.9]
See also:
cv_isi_hist, cv_local, cv_isi, SpikeTrain.cv_kl
"""
ids = self.id_list
N = len(ids)
cvs_kl = numpy.empty(N)
for idx in xrange(N):
cvs_kl[idx] = self.spiketrains[ids[idx]].cv_kl(bins = bins)
if float_only:
cvs_kl = numpy.extract(numpy.logical_not(numpy.isnan(cvs_kl)),cvs_kl)
return cvs_kl
def cv_isi_hist(self, bins=50, display=False, kwargs={}):
"""
Return the histogram of the cv coefficients.
Inputs:
bins - the number of bins (between the min and max of the data)
or a list/array containing the lower edges of the bins.
display - if True, a new figure is created. Could also be a subplot
kwargs - dictionary contening extra parameters that will be sent to the plot
function
Examples:
>> z = subplot(221)
>> spklist.cv_isi_hist(10, display=z, kwargs={'color':'r'})
See also:
cv_isi, cv_local, cv_kl
"""
cvs = self.cv_isi(float_only=True)
if newnum:
if not hist_new:
values, xaxis = numpy.histogram(cvs, bins=bins)
xaxis = xaxis[:-1]
else:
values, xaxis = numpy.histogram(cvs, bins=bins, new=newnum)
xaxis = xaxis[:-1]
else:
values, xaxis = numpy.histogram(cvs, bins=bins, new=True)
subplot = get_display(display)
values = values/float(values.sum())
if not subplot or not HAVE_PYLAB:
return values, xaxis
else:
xlabel = " CV ISI"
ylabel = "\% of Neurons"
set_labels(subplot, xlabel, ylabel)
subplot.plot(xaxis, values, **kwargs)
pylab.draw()
def cv_local(self, t_start=None, t_stop=None, length=12, step=6):
"""
Provides a modified version of the coefficient of variation, a measure
that describes the regularity of spiking neurons/networks.
This CV is local in time, i.e., it considers the isi variance
for consecutive inter-spike-intervals, and is then averaged
over a certain window in time (1/12 length), which is shifted in time (1/6 step).
Therefore rate changes and/or bimodal isi distributions do not lead
to an exceptionally high CV.
Inputs:
t_start - The time to start the averaging
t_stop - The time to stop the averaging
length - A factor to determine the window length for the average,
The window considered will be (t_stop-t_start)/length.
12 by default
time_bin - factor to determin the step size
for its shifting.
Examples:
>> spklist.cv_local(0, 1000, 12, 10)
See also
cv_isi, cv_isi_hist, cv_kl
"""
if t_start == None:
t_start = self.t_start
if t_stop == None:
t_stop = self.t_stop
windowLength = (t_stop-t_start)/length
stepSize = windowLength/step
maxBin = int((t_stop-t_start-windowLength)/stepSize)
#print 'wL=',windowLength,' sSize=',stepSize,' maxBin=',maxBin
vLocCV = numpy.zeros(maxBin)
vCnt = numpy.zeros(maxBin)
N = len(self.id_list)
for i in xrange(N):
if len(self.spiketrains[i])>15 :
for j in xrange(2,len(self.spiketrains[i]),1) :
diff1=self.spiketrains[i].spike_times[j]-self.spiketrains[i].spike_times[j-1]
diff0=self.spiketrains[i].spike_times[j-1]-self.spiketrains[i].spike_times[j-2]
tmp=2*numpy.abs(diff1-diff0)/(diff1+diff0)
for b in range(maxBin) :
if self.spiketrains[i].spike_times[j-2]>b*stepSize+t_start and self.spiketrains[i].spike_times[j]<=b*stepSize+t_start+windowLength :
vLocCV[b] = vLocCV[b] + tmp
vCnt[b] = vCnt[b]+1
locCV= 0.0
for b in range(maxBin) :
if vCnt[b] > 0 :
locCV = vLocCV[b]/vCnt[b]
return locCV
def mean_rate(self, t_start=None, t_stop=None):
"""
Return the mean firing rate averaged accross all SpikeTrains between t_start and t_stop.
Inputs:
t_start - begining of the selected area to compute mean_rate, in ms
t_stop - end of the selected area to compute mean_rate, in ms
If t_start or t_stop are not defined, those of the SpikeList are used
Examples:
>> spklist.mean_rate()
>> 12.63
See also
mean_rates, mean_rate_std
"""
return numpy.mean(self.mean_rates(t_start, t_stop))
def mean_rate_std(self, t_start=None, t_stop=None):
"""
Standard deviation of the firing rates accross all SpikeTrains
between t_start and t_stop
Inputs:
t_start - begining of the selected area to compute std(mean_rate), in ms
t_stop - end of the selected area to compute std(mean_rate), in ms
If t_start or t_stop are not defined, those of the SpikeList are used
Examples:
>> spklist.mean_rate_std()
>> 13.25
See also
mean_rate, mean_rates
"""
return numpy.std(self.mean_rates(t_start, t_stop))
def mean_rates(self, t_start=None, t_stop=None):
"""
Returns a vector of the size of id_list giving the mean firing rate for each neuron
Inputs:
t_start - begining of the selected area to compute std(mean_rate), in ms
t_stop - end of the selected area to compute std(mean_rate), in ms
If t_start or t_stop are not defined, those of the SpikeList are used
See also
mean_rate, mean_rate_std
"""
rates = []
for id in self.id_list:
rates.append(self.spiketrains[id].mean_rate(t_start, t_stop))
return rates
def rate_distribution(self, nbins=25, normalize=True, display=False, kwargs={}):
"""
Return a vector with all the mean firing rates for all SpikeTrains.
Inputs:
bins - the number of bins (between the min and max of the data)
or a list/array containing the lower edges of the bins.
display - if True, a new figure is created. Could also be a subplot
kwargs - dictionary contening extra parameters that will be sent to the plot
function
See also
mean_rate, mean_rates
"""
rates = self.mean_rates()
subplot = get_display(display)
if not subplot or not HAVE_PYLAB:
return rates
else:
if newnum:
if not hist_new:
values, xaxis = numpy.histogram(rates, nbins)
xaxis = xaxis[:-1]
else:
values, xaxis = numpy.histogram(rates, nbins, new=newnum)
xaxis = xaxis[:-1]
else:
values, xaxis = numpy.histogram(rates, nbins)
xlabel = "Average Firing Rate (Hz)"
ylabel = "\% of Neurons"
set_labels(subplot, xlabel, ylabel)
subplot.plot(xaxis, values/float(values.sum()), **kwargs)
pylab.draw()
def spike_histogram(self, time_bin, normalized=False, binary=False, display=False, kwargs={}):
"""
Generate an array with all the spike_histograms of all the SpikeTrains
objects within the SpikeList.
Inputs:
time_bin - the time bin used to gather the data
normalized - if True, the histogram are in Hz (spikes/second), otherwise they are
in spikes/bin
display - if True, a new figure is created. Could also be a subplot. The averaged
spike_histogram over the whole population is then plotted
binary - if True, a binary matrix of 0/1 is returned
kwargs - dictionary contening extra parameters that will be sent to the plot
function
See also
firing_rate, time_axis
"""
nbins = self.time_axis(time_bin)
N = len(self)
M = len(nbins)
if newnum:
M -= 1
if binary:
spike_hist = numpy.zeros((N, M), numpy.int)
else:
spike_hist = numpy.zeros((N, M), numpy.float32)
subplot = get_display(display)
for idx,id in enumerate(self.id_list):
if newnum:
if not hist_new:
hist, edges = numpy.histogram(self.spiketrains[id].spike_times, nbins)
else:
hist, edges = numpy.histogram(self.spiketrains[id].spike_times, nbins, new=newnum)
else:
hist, edges = numpy.histogram(self.spike_times, bins)
hist = hist.astype(float)
if normalized: # what about normalization if time_bin is a sequence?
hist *= 1000.0/float(time_bin)
if binary:
hist = hist.astype(bool)
spike_hist[idx,:] = hist
if not subplot or not HAVE_PYLAB:
return spike_hist
else:
if normalized:
ylabel = "Firing rate (Hz)"
else:
ylabel = "Spikes per bin"
xlabel = "Time (ms)"
set_labels(subplot, xlabel, ylabel)
axis = self.time_axis(time_bin)
if newnum:
axis = axis[:len(axis)-1]
subplot.plot(axis,numpy.mean(spike_hist, axis=0),**kwargs)
pylab.draw()
def firing_rate(self, time_bin, display=False, average=False, binary=False, kwargs={}):
"""
Generate an array with all the instantaneous firing rates along time (in Hz)
of all the SpikeTrains objects within the SpikeList. If average is True, it gives the
average firing rate over the whole SpikeList
Inputs:
time_bin - the time bin used to gather the data
average - If True, return a single vector of the average firing rate over the whole SpikeList
display - if True, a new figure is created. Could also be a subplot. The averaged
spike_histogram over the whole population is then plotted
binary - If True, a binary matrix with 0/1 is returned.
kwargs - dictionary contening extra parameters that will be sent to the plot
function
See also
spike_histogram, time_axis
"""
result = self.spike_histogram(time_bin, normalized=True, binary=False, display=display, kwargs=kwargs)
if average:
return numpy.mean(result, axis=0)
else:
return result
def averaged_instantaneous_rate(self, resolution, kernel, norm, m_idx=None,
t_start=None, t_stop=None, acausal=True,
trim=False):
"""
Estimate the instantaneous firing rate averaged across neurons in the
SpikeList, by kernel convolution.
Inputs:
resolution - time stamp resolution of the spike times (ms). the
same resolution will be assumed for the kernel
kernel - kernel function used to convolve with
norm - normalization factor associated with kernel function
(see analysis.make_kernel for details)
t_start - start time of the interval used to compute the firing
rate
t_stop - end time of the interval used to compute the firing
rate (included)
acausal - if True, acausal filtering is used, i.e., the gravity
center of the filter function is aligned with the
spike to convolve
m_idx - index of the value in the kernel function vector that
corresponds to its gravity center. this parameter is
not mandatory for symmetrical kernels but it is
required when assymmetrical kernels are to be aligned
at their gravity center with the event times
trim - if True, only the 'valid' region of the convolved
signal are returned, i.e., the points where there
isn't complete overlap between kernel and spike train
are discarded
NOTE: if True and an assymetrical kernel is provided
the output will not be aligned with [t_start, t_stop]
See also:
analysis.make_kernel, SpikeTrain.instantaneous_rate
"""
if t_start is None:
t_start = self.t_start
if t_stop is None:
t_stop = self.t_stop
if m_idx is None:
m_idx = kernel.size / 2
spikes_slice = []
for i in self:
train_slice = i.spike_times[(i.spike_times >= t_start) & (
i.spike_times <= t_stop)]
spikes_slice += train_slice.tolist()
time_vector = numpy.zeros((t_stop - t_start)/resolution + 1)
for spike in spikes_slice:
index = (spike - t_start) / resolution
time_vector[index] += 1.
avg_time_vector = time_vector / float(self.id_list.size)
r = norm * scipy.signal.fftconvolve(avg_time_vector, kernel, 'full')
if acausal is True:
if trim is False:
r = r[m_idx:-(kernel.size - m_idx)]
t_axis = numpy.linspace(t_start, t_stop, r.size)
return t_axis, r
elif trim is True:
r = r[2 * m_idx:-2*(kernel.size - m_idx)]
t_start = t_start + m_idx * resolution
t_stop = t_stop - ((kernel.size) - m_idx) * resolution
t_axis = numpy.linspace(t_start, t_stop, r.size)
return t_axis, r
if acausal is False:
if trim is False:
r = r[m_idx:-(kernel.size - m_idx)]
t_axis = (numpy.linspace(t_start, t_stop, r.size) +
m_idx * resolution)
return t_axis, r
elif trim is True:
r = r[2 * m_idx:-2*(kernel.size - m_idx)]
t_start = t_start + m_idx * resolution
t_stop = t_stop - ((kernel.size) - m_idx) * resolution
t_axis = (numpy.linspace(t_start, t_stop, r.size) +
m_idx * resolution)
return t_axis, r
def fano_factor(self, time_bin):
"""
Compute the Fano Factor of the population activity.
Inputs:
time_bin - the number of bins (between the min and max of the data)
or a list/array containing the lower edges of the bins.
The Fano Factor is computed as the variance of the averaged activity divided by its
mean
See also
spike_histogram, firing_rate
"""
firing_rate = self.spike_histogram(time_bin)
firing_rate = numpy.mean(firing_rate, axis=0)
fano = numpy.var(firing_rate)/numpy.mean(firing_rate)
return fano
def fano_factors_isi(self):
"""
Return a list containing the fano factors for each neuron
See also
isi, isi_cv
"""
fano_factors = []
for id in self.id_list:
try:
fano_factors.append(self.spiketrains[id].fano_factor_isi())
except:
pass
return fano_factors
def id2position(self, id, offset=0):
"""
Return a position (x,y) from an id if the cells are aranged on a
grid of size dims, as defined in the dims attribute of the SpikeList object.
This assumes that cells are ordered from left to right, top to bottom,
and that dims specifies (height, width), i.e. if dims = (10,12), this is
an array with 10 rows and 12 columns, and hence has width 12 units and
height 10 units.
Inputs:
id - the id of the cell
The 'dimensions' attribute of the SpikeList must be defined
See also
activity_map, activity_movie
"""
if self.dimensions is None:
raise Exception("Dimensions of the population are not defined ! Set spikelist.dimensions")
if len(self.dimensions) == 1:
return id-offset
if len(self.dimensions) == 2:
x = (id-offset) % self.dimensions[0]
y = ((id-offset)/self.dimensions[0]).astype(int)
return (x,y)
def position2id(self, position, offset=0):
"""
Return the id of the cell at position (x,y) if the cells are aranged on a
grid of size dims, as defined in the dims attribute of the SpikeList object
Inputs:
position - a tuple with the position of the cell
The 'dimensions' attribute of the SpikeList must be defined and have the same shape
as the position argument
See also
activity_map, activity_movie, id2position
"""
if self.dimensions is None:
raise Exception("Dimensions of the population are not defined ! Set spikelist.dimensions")
assert len(position) == len(tuple(self.dimensions)), "position does not have the correct shape !"
if len(self.dimensions) == 1:
return position + offset
if len(self.dimensions) == 2:
return position[1]*self.dimensions[1] + position[0] + offset
def activity_map(self, t_start=None, t_stop=None, float_positions=None, display=False, kwargs={}):
"""
Generate a 2D map of the activity averaged between t_start and t_stop.
If t_start and t_stop are not defined, we used those of the SpikeList object
Inputs:
t_start - if not defined, the one of the SpikeList is used
t_stop - if not defined, the one of the SpikeList is used
float_positions - None by default, meaning that the dimensions attribute
of the SpikeList is used to arange the ids on a 2D grid.
Otherwise, if the cells have floating positions,
float_positions should be an array of size
(2, nb_cells) with the x (first line) and y (second line)
coordinates of the cells
display - if True, a new figure is created. Could also be a subplot.
The averaged spike_histogram over the whole population is
then plotted
kwargs - dictionary contening extra parameters that will be sent
to the plot function
The 'dimensions' attribute of the SpikeList is used to turn ids into 2d positions. It should
therefore be not empty.
Examples:
>> spklist.activity_map(0,1000,display=True)
See also
activity_movie
"""
subplot = get_display(display)
if t_start == None:
t_start = self.t_start
if t_stop == None:
t_stop = self.t_stop
if t_start != self.t_start or t_stop != self.t_stop:
spklist = self.time_slice(t_start, t_stop)
else:
spklist = self
if float_positions is None:
if self.dimensions is None:
raise Exception("Dimensions of the population are not defined ! Set spikelist.dims")
activity_map = numpy.zeros(self.dimensions, float)
rates = spklist.mean_rates()
#id_offset = min(self.id_list)
#x,y = spklist.id2position(spklist.id_list, id_offset)
x,y = spklist.id2position(spklist.id_list)
#j,i = x, self.dimensions[0] - 1 - y
for count, id in enumerate(spklist.id_list):
#activity_map[i[count],j[count]] = rates[count]
activity_map[x[count],y[count]] = rates[count]
if not subplot or not HAVE_PYLAB or not HAVE_MATPLOTLIB:
return activity_map
else:
im = subplot.imshow(activity_map, **kwargs)
pylab.colorbar(im)
pylab.draw()
elif isinstance(float_positions, numpy.ndarray):
if not len(spklist.id_list) == len(float_positions[0]):
raise Exception("Error, the number of flotting positions does not match the number of cells in the SpikeList")
rates = spklist.mean_rates()
if not subplot or not HAVE_PYLAB or not HAVE_MATPLOTLIB:
return rates
else:
x = float_positions[0,:]
y = float_positions[1,:]
im = subplot.scatter(x,y,c=rates, **kwargs)
pylab.colorbar(im)
pylab.draw()
def pairwise_cc(self, nb_pairs, pairs_generator=None, time_bin=1., average=True, display=False, kwargs={}):
"""
Function to generate an array of cross correlations computed
between pairs of cells within the SpikeTrains.
Inputs:
nb_pairs - int specifying the number of pairs
pairs_generator - The generator that will be used to draw the pairs. If None, a default one is
created as RandomPairs(spk, spk, no_silent=False, no_auto=True)
time_bin - The time bin used to gather the spikes
average - If true, only the averaged CC among all the pairs is returned (less memory needed)
display - if True, a new figure is created. Could also be a subplot. The averaged
spike_histogram over the whole population is then plotted
kwargs - dictionary contening extra parameters that will be sent to the plot
function
Examples
>> a.pairwise_cc(500, time_bin=1, averaged=True)
>> a.pairwise_cc(500, time_bin=1, averaged=True, display=subplot(221), kwargs={'color':'r'})
>> a.pairwise_cc(100, CustomPairs(a,a,[(i,i+1) for i in xrange(100)]), time_bin=5)
See also
pairwise_pearson_corrcoeff, pairwise_cc_zero, RandomPairs, AutoPairs, CustomPairs
"""
subplot = get_display(display)
## We have to extract only the non silent cells, to avoid problems
if pairs_generator is None:
pairs_generator = RandomPairs(self, self, False, True)
# Then we select the pairs of cells
pairs = pairs_generator.get_pairs(nb_pairs)
N = len(pairs)
if newnum:
length = 2*(len(pairs_generator.spk1.time_axis(time_bin))-1)
else:
length = 2*len(pairs_generator.spk1.time_axis(time_bin))
if not average:
results = numpy.zeros((N,length), float)
else:
results = numpy.zeros(length, float)
for idx in xrange(N):
# We need to avoid empty spike histogram, otherwise the ccf function
# will give a nan vector
hist_1 = pairs_generator.spk1[pairs[idx,0]].time_histogram(time_bin)
hist_2 = pairs_generator.spk2[pairs[idx,1]].time_histogram(time_bin)
if not average:
from NeuroTools import analysis
results[idx,:] = analysis.ccf(hist_1,hist_2)
else:
from NeuroTools import analysis
results += analysis.ccf(hist_1,hist_2)
if not subplot or not HAVE_PYLAB:
if not average:
return results
else:
return results/N
else:
if average:
results = results/N
else:
results = numpy.sum(results, axis=0)/N
xaxis = time_bin*numpy.arange(-len(results)/2, len(results)/2)
xlabel = "Time (ms)"
ylabel = "Cross Correlation"
subplot.plot(xaxis, results, **kwargs)
set_labels(subplot, xlabel, ylabel)
pylab.draw()
def pairwise_cc_zero(self, nb_pairs, pairs_generator=None, time_bin=1., time_window=None, display=False, kwargs={}):
"""
Function to return the normalized cross correlation coefficient at zero time
lag according to the method given in:
See A. Aertsen et al,
Dynamics of neuronal firing correlation: modulation of effective connectivity
J Neurophysiol, 61:900-917, 1989
The coefficient is averaged over N pairs of cells. If time window is specified, compute
the corr coeff on a sliding window, and therefore returns not a value but a vector.
Inputs:
nb_pairs - int specifying the number of pairs
pairs_generator - The generator that will be used to draw the pairs. If None, a default one is
created as RandomPairs(spk, spk, no_silent=False, no_auto=True)
time_bin - The time bin used to gather the spikes
time_window - None by default, and then a single number, the normalized CC is returned.
If this is a float, then size (in ms) of the sliding window used to
compute the normalized cc. A Vector is then returned
display - if True, a new figure is created. Could also be a subplot. The averaged
spike_histogram over the whole population is then plotted
kwargs - dictionary contening extra parameters that will be sent to the plot
function
Examples:
>> a.pairwise_cc_zero(100, time_bin=1)
1.0
>> a.pairwise_cc_zero(100, CustomPairs(a, a, [(i,i+1) for i in xrange(100)]), time_bin=1)
0.45
>> a.pairwise_cc_zero(100, RandomPairs(a, a, no_silent=True), time_bin=5, time_window=10, display=True)
See also:
pairwise_cc, pairwise_pearson_corrcoeff, RandomPairs, AutoPairs, CustomPairs
"""
subplot = get_display(display)
if pairs_generator is None:
pairs_generator = RandomPairs(self, self, False, True)
spk1, spk2, pairs = self.__select_with_pairs__(nb_pairs, pairs_generator)
N = len(pairs)
if spk1.time_parameters() != spk2.time_parameters():
raise Exception("The two SpikeList must have common time axis !")
num_bins = int(numpy.round((self.t_stop-self.t_start)/time_bin)+1)
mat_neur1 = numpy.zeros((num_bins,N),int)
mat_neur2 = numpy.zeros((num_bins,N),int)
times1, ids1 = spk1.convert("times, ids")
times2, ids2 = spk2.convert("times, ids")
cells_id = spk1.id_list
for idx in xrange(len(cells_id)):
ids1[numpy.where(ids1 == cells_id[idx])[0]] = idx
cells_id = spk2.id_list
for idx in xrange(len(cells_id)):
ids2[numpy.where(ids2 == cells_id[idx])[0]] = idx
times1 = numpy.array(((times1 - self.t_start)/time_bin),int)
times2 = numpy.array(((times2 - self.t_start)/time_bin),int)
mat_neur1[times1,ids1] = 1
mat_neur2[times2,ids2] = 1
if time_window:
nb_pts = int(time_window/time_bin)
mat_prod = mat_neur1*mat_neur2
cc_time = numpy.zeros((num_bins-nb_pts),float)
xaxis = numpy.zeros((num_bins-nb_pts))
M = float(nb_pts*N)
bound = int(numpy.ceil(nb_pts/2))
for idx in xrange(bound,num_bins-bound):
s_min = idx-bound
s_max = idx+bound
Z = numpy.sum(numpy.sum(mat_prod[s_min:s_max]))/M
X = numpy.sum(numpy.sum(mat_neur1[s_min:s_max]))/M
Y = numpy.sum(numpy.sum(mat_neur2[s_min:s_max]))/M
cc_time[s_min] = (Z-X*Y)/numpy.sqrt((X*(1-X))*(Y*(1-Y)))
xaxis[s_min] = time_bin*idx
if not subplot or not HAVE_PYLAB:
return cc_time
else:
xlabel = "Time (ms)"
ylabel = "Normalized CC"
subplot.plot(xaxis+self.t_start, cc_time, **kwargs)
set_labels(subplot, xlabel, ylabel)
pylab.draw()
else:
M = float(num_bins*N)
X = len(times1)/M
Y = len(times2)/M
Z = numpy.sum(numpy.sum(mat_neur1*mat_neur2))/M
return (Z-X*Y)/numpy.sqrt((X*(1-X))*(Y*(1-Y)))
def distance_victorpurpura(self, nb_pairs, pairs_generator=None, cost=0.5):
"""
Function to calculate the Victor-Purpura distance averaged over N pairs in the SpikeList
See J. D. Victor and K. P. Purpura,
Nature and precision of temporal coding in visual cortex: a metric-space
analysis.,
J Neurophysiol,76(2):1310-1326, 1996
Inputs:
nb_pairs - int specifying the number of pairs
pairs_generator - The generator that will be used to draw the pairs. If None, a default one is
created as RandomPairs(spk, spk, no_silent=False, no_auto=True)
cost - The cost parameter. See the paper for more informations. BY default, set to 0.5
See also
RandomPairs, AutoPairs, CustomPairs
"""
if pairs_generator is None:
pairs_generator = RandomPairs(self, self, False, True)
pairs = pairs_generator.get_pairs(nb_pairs)
N = len(pairs)
distance = 0.
for idx in xrange(N):
idx_1 = pairs[idx,0]
idx_2 = pairs[idx,1]
distance += pairs_generator.spk1[idx_1].distance_victorpurpura(pairs_generator.spk2[idx_2], cost)
return distance/N
def distance_kreuz(self, nb_pairs, pairs_generator=None, dt=0.1):
"""
Function to calculate the Kreuz/Politi distance between two spike trains
See Kreuz, T.; Haas, J.S.; Morelli, A.; Abarbanel, H.D.I. & Politi,
A. Measuring spike train synchrony.
J Neurosci Methods, 2007, 165, 151-161
Inputs:
nb_pairs - int specifying the number of pairs
pairs_generator - The generator that will be used to draw the pairs. If None, a default one is
created as RandomPairs(spk, spk, no_silent=False, no_auto=True)
dt - The time bin used to discretized the spike times
See also
RandomPairs, AutoPairs, CustomPairs
"""
if pairs_generator is None:
pairs_generator = RandomPairs(self, self, False, True)
pairs = pairs_generator.get_pairs(nb_pairs)
N = len(pairs)
distance = 0.
for idx in xrange(N):
idx_1 = pairs[idx,0]
idx_2 = pairs[idx,1]
distance += pairs_generator.spk1[idx_1].distance_kreuz(pairs_generator.spk2[idx_2], dt)
return distance/N
def mean_rate_variance(self, time_bin):
"""
Return the standard deviation of the firing rate along time,
if events are binned with a time bin.
Inputs:
time_bin - time bin to bin events
See also
mean_rate, mean_rates, mean_rate_covariance, firing_rate
"""
firing_rate = self.firing_rate(time_bin)
return numpy.var(numpy.mean(firing_rate, axis=0))
def mean_rate_covariance(self, spikelist, time_bin):
"""
Return the covariance of the firing rate along time,
if events are binned with a time bin.
Inputs:
spikelist - the other spikelist to compute covariance
time_bin - time bin to bin events
See also
mean_rate, mean_rates, mean_rate_variance, firing_rate
"""
if not isinstance(spikelist, SpikeList):
raise Exception("Error, argument should be a SpikeList object")
if not spikelist.time_parameters() == self.time_parameters():
raise Exception("Error, both SpikeLists should share common t_start, t_stop")
frate_1 = self.firing_rate(time_bin, average=True)
frate_2 = spikelist.firing_rate(time_bin, average=True)
N = len(frate_1)
cov = numpy.sum(frate_1*frate_2)/N-numpy.sum(frate_1)*numpy.sum(frate_2)/(N*N)
return cov
def raster_plot(self, id_list=None, t_start=None, t_stop=None, display=True, kwargs={}):
"""
Generate a raster plot for the SpikeList in a subwindow of interest,
defined by id_list, t_start and t_stop.
Inputs:
id_list - can be a integer (and then N cells are randomly selected) or a list of ids. If None,
we use all the ids of the SpikeList
t_start - in ms. If not defined, the one of the SpikeList object is used
t_stop - in ms. If not defined, the one of the SpikeList object is used
display - if True, a new figure is created. Could also be a subplot
kwargs - dictionary contening extra parameters that will be sent to the plot
function
Examples:
>> z = subplot(221)
>> spikelist.raster_plot(display=z, kwargs={'color':'r'})
See also
SpikeTrain.raster_plot
"""
subplot = get_display(display)
if id_list == None:
id_list = self.id_list
spk = self
else:
spk = self.id_slice(id_list)
if t_start is None: t_start = spk.t_start
if t_stop is None: t_stop = spk.t_stop
if t_start != spk.t_start or t_stop != spk.t_stop:
spk = spk.time_slice(t_start, t_stop)
if not subplot or not HAVE_PYLAB:
print(PYLAB_ERROR)
else:
ids, spike_times = spk.convert(format="[ids, times]")
idx = numpy.where((spike_times >= t_start) & (spike_times <= t_stop))[0]
if len(spike_times) > 0:
subplot.plot(spike_times, ids, ',', **kwargs)
xlabel = "Time (ms)"
ylabel = "Neuron #"
set_labels(subplot, xlabel, ylabel)
min_id = numpy.min(spk.id_list)
max_id = numpy.max(spk.id_list)
length = t_stop - t_start
set_axis_limits(subplot, t_start-0.05*length, t_stop+0.05*length, min_id-2, max_id+2)
pylab.draw()
def psth(self, events, average=True, time_bin=2, t_min=50, t_max=50, display = False, kwargs={}):
"""
Return the psth of the cells contained in the SpikeList according to selected events,
on a time window t_spikes - tmin, t_spikes + tmax
Can return either the averaged psth (average = True), or an array of all the
psth triggered by all the spikes.
Inputs:
events - Can be a SpikeTrain object (and events will be the spikes) or just a list
of times
average - If True, return a single vector of the averaged waveform. If False,
return an array of all the waveforms.
time_bin- The time bin (in ms) used to gather the spike for the psth
t_min - Time (>0) to average the signal before an event, in ms (default 0)
t_max - Time (>0) to average the signal after an event, in ms (default 100)
display - if True, a new figure is created. Could also be a subplot.
kwargs - dictionary contening extra parameters that will be sent to the plot
function
Examples:
>> vm.psth(spktrain, average=False, t_min = 50, t_max = 150)
>> vm.psth(spktrain, average=True)
>> vm.psth(range(0,1000,10), average=False, display=True)
See also
SpikeTrain.spike_histogram
"""
if isinstance(events, SpikeTrain):
events = events.spike_times
assert (t_min >= 0) and (t_max >= 0), "t_min and t_max should be greater than 0"
assert len(events) > 0, "events should not be empty and should contained at least one element"
spk_hist = self.spike_histogram(time_bin)
subplot = get_display(display)
count = 0
t_min_l = numpy.floor(t_min/time_bin)
t_max_l = numpy.floor(t_max/time_bin)
result = numpy.zeros((len(self), t_min_l+t_max_l), numpy.float32)
t_start = numpy.floor(self.t_start/time_bin)
t_stop = numpy.floor(self.t_stop/time_bin)
for ev in events:
ev = numpy.floor(ev/time_bin)
if ((ev - t_min_l )> t_start) and (ev + t_max_l ) < t_stop:
count += 1
result += spk_hist[:,(ev-t_start-t_min_l):ev-t_start+t_max_l]
result /= count
if not subplot or not HAVE_PYLAB:
return result
else:
xlabel = "Time (ms)"
ylabel = "PSTH"
time = numpy.linspace(-t_min, t_max, (t_min+t_max)/time_bin)
set_labels(subplot, xlabel, ylabel)
if average:
subplot.plot(time, result, **kwargs)
subplot.errorbar(times, mean(result, 0), yerr=std(result, 0))
else:
for idx in xrange(len(result)):
subplot.plot(time, result[idx,:], c='0.5', **kwargs)
subplot.hold(1)
result = numpy.mean(result, 0)
subplot.plot(time, result, c='k', **kwargs)
xmin, xmax, ymin, ymax = subplot.axis()
subplot.plot([0,0],[ymin, ymax], c='r')
set_axis_limits(subplot, -t_min, t_max, ymin, ymax)
pylab.draw()
if average:
result = numpy.mean(result, 0)
return result
def activity_movie(self, time_bin=10, t_start=None, t_stop=None, float_positions=None, output="animation.mpg", bounds=(0,5), fps=10, display=True, kwargs={}):
"""
Generate a movie of the activity between t_start and t_stop.
If t_start and t_stop are not defined, we used those of the SpikeList object
Inputs:
time_bin - time step to bin activity during the movie.
One frame is the mean rate during time_bin
t_start - if not defined, the one of the SpikeList is used, in ms
t_stop - if not defined, the one of the SpikeList is used, in ms
float_positions - None by default, meaning that the dimensions attribute of the SpikeList
is used to arange the ids on a 2D grid. Otherwise, if the cells have
flotting positions, float_positions should be an array of size
(2, nb_cells) with the x (first line) and y (second line) coordinates of
the cells
output - The filename to store the movie
bounds - The common color bounds used during all the movies frame.
This is a tuple
of values (min, max), in spikes per frame.
fps - The number of frame per second in the final movie
display - if True, a new figure is created. Could also be a subplot.
kwargs - dictionary contening extra parameters that will be sent to the plot
function
The 'dimensions' attribute of the SpikeList is used to turn ids into 2d positions. It should
therefore be not empty.
Examples:
>> spklist.activity_movie(10,0,1000,bounds=(0,5),display=subplot(221),output="test.mpg")
See also
activity_map
"""
subplot = get_display(display)
if t_start is None: t_start = self.t_start
if t_stop is None: t_stop = self.t_stop
if not subplot or not HAVE_PYLAB:
print(PYLAB_ERROR)
else:
files = []
if float_positions is None:
activity_map = numpy.zeros(self.dimensions)
im = subplot.imshow(activity_map, **kwargs)
im.set_clim(bounds[0],bounds[1])
pylab.colorbar(im)
else:
rates = [0]*len(self)
im = subplot.scatter(float_positions[0,:], float_positions[1,:], c=rates, **kwargs)
im.set_clim(bounds[0],bounds[1])
pylab.colorbar(im)
count = 0
idx = 0
manager = pylab.get_current_fig_manager()
if t_start != self.t_start or t_stop != self.t_stop:
spk = self.time_slice(t_start, t_stop)
else:
spk = self
time, pos = spk.convert("times, ids")
# We sort the spikes to allow faster process later
sort_idx = time.ravel().argsort(kind="quicksort")
time = time[sort_idx]
pos = pos[sort_idx]
x,y = spk.id2position(pos)
max_idx = len(time)-1
logging.info('Making movie %s - this make take a while' % output)
if float_positions is None:
if self.dimensions is None:
raise Exception("Dimensions of the population are not defined ! Set spikelist.dims")
while (t_start < t_stop):
activity_map = numpy.zeros(spk.dimensions)
while ((time[idx] < t_start + time_bin) and (idx < max_idx)):
#j,i = x, self.dimensions[0] - 1 -y
activity_map[x[idx],y[idx]] += 1
idx += 1
im.set_array(activity_map)
subplot.title("time = %d ms" %t_start)
im.set_clim(bounds[0],bounds[1])
manager.canvas.draw()
fname = "_tmp_spikes_%05d.png" %count
#logging.debug("Saving Frame %s", fname)
#progress_bar(float(t_start)/t_stop)
pylab.savefig(fname)
files.append(fname)
t_start += time_bin
count += 1
elif isinstance(float_positions, numpy.ndarray):
if not len(self) == len(float_positions[0]):
raise Exception("Error, the number of flotting positions does not match the number of cells in the SpikeList")
while (t_start < t_stop):
rates = [0]*len(self)
while ((time[idx] < t_start + time_bin) and (idx < max_idx)):
rates[pos[idx]] += 1
idx += 1
im = subplot.scatter(float_positions[0,:], float_positions[1,:], c=rates, **kwargs)
subplot.title("time = %d ms" %t_start)
im.set_clim(bounds[0],bounds[1])
manager.canvas.draw()
fname = "_tmp_spikes_%05d.png" %count
#logging.debug("Saving Frame %s", fname)
progress_bar(float(t_start)/t_stop)
pylab.savefig(fname)
files.append(fname)
t_start += time_bin
count += 1
command = "mencoder 'mf://_tmp_*.png' -mf type=png:fps=%d -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o %s" %(fps,output)
logging.debug(command)
os.system(command)
## cleanup
logging.debug("Clean up....")
for fname in files: os.remove(fname)
def pairwise_pearson_corrcoeff(self, nb_pairs, pairs_generator=None, time_bin=1., all_coef=False):
"""
Function to return the mean and the variance of the pearson correlation coefficient.
For more details, see Kumar et al, ....
Inputs:
nb_pairs - int specifying the number of pairs
pairs_generator - The generator that will be used to draw the pairs. If None, a default one is
created as RandomPairs(spk, spk, no_silent=False, no_auto=True)
time_bin - The time bin used to gather the spikes
all_coef - If True, the whole list of correlation coefficient is returned
Examples
>> spk.pairwise_pearson_corrcoeff(50, time_bin=5)
(0.234, 0.0087)
>> spk.pairwise_pearson_corrcoeff(100, AutoPairs(spk, spk))
(1.0, 0.0)
See also
pairwise_cc, pairwise_cc_zero, RandomPairs, AutoPairs, CustomPairs
"""
## We have to extract only the non silent cells, to avoid problems
if pairs_generator == None:
pairs_generator = RandomPairs(self, self, False, True)
pairs = pairs_generator.get_pairs(nb_pairs)
N = len(pairs)
cor = numpy.zeros(N, float)
for idx in xrange(N):
hist_1 = pairs_generator.spk1[pairs[idx,0]].time_histogram(time_bin)
hist_2 = pairs_generator.spk2[pairs[idx,1]].time_histogram(time_bin)
# TODO: normalize the cor, look in 1.6 in Kumar et al.
# bruederle: the function corrcoeff actually implements the definition in Kumar 1.6
cov = numpy.corrcoef(hist_1,hist_2)[1][0]
# bruederle: the expression 'cov' is already, per definition, the pearson correlation coefficient
# see http://en.wikipedia.org/wiki/Correlation#The_sample_correlation
cor[idx] = cov
# these two versions have been existing here before
#cor[count] = cov/numpy.sqrt(n1_hist[0].var()*n2_hist[0].var())
if all_coef:
return cor
else:
cor_coef_mean = cor.mean()
cor_coef_std = cor.std()
return (cor_coef_mean, cor_coef_std)
####################################################################
### TOO SPECIFIC METHOD ?
### Better documentation
####################################################################
def f1f0_ratios(self, time_bin, f_stim):
"""
Returns the F1/F0 amplitude ratios for the spike trains contained in the
spike list, where the input stimulus frequency is f_stim.
"""
f1f0_dict = {}
for id, spiketrain in self.spiketrains.items():
f1f0_dict[id] = spiketrain.f1f0_ratio(time_bin, f_stim)
return f1f0_dict
#######################################################################
## Method to convert the SpikeList into several others format ##
#######################################################################
def convert(self, format="[times, ids]", relative=False, quantized=False):
"""
Return a new representation of the SpikeList object, in a user designed format.
format is an expression containing either the keywords times and ids,
time and id.
Inputs:
relative - a boolean to say if a relative representation of the spikes
times compared to t_start is needed
quantized - a boolean to round the spikes_times.
Examples:
>> spk.convert("[times, ids]") will return a list of two elements, the
first one being the array of all the spikes, the second the array of all the
corresponding ids
>> spk.convert("[(time,id)]") will return a list of tuples (time, id)
See also
SpikeTrain.format
"""
is_times = re.compile("times")
is_ids = re.compile("ids")
if len(self) > 0:
times = numpy.concatenate([st.format(relative, quantized) for st in self.spiketrains.itervalues()])
ids = numpy.concatenate([id*numpy.ones(len(st.spike_times), int) for id,st in self.spiketrains.iteritems()])
else:
times = []
ids = []
if is_times.search(format):
if is_ids.search(format):
return eval(format)
else:
raise Exception("You must have a format with [times, ids] or [time, id]")
is_times = re.compile("time")
is_ids = re.compile("id")
if is_times.search(format):
if is_ids.search(format):
result = []
for id, time in zip(ids, times):
result.append(eval(format))
else:
raise Exception("You must have a format with [times, ids] or [time, id]")
return result
def raw_data(self):
"""
Function to return a N by 2 array of all times and ids.
Examples:
>> spklist.raw_data()
>> array([[ 1.00000000e+00, 1.00000000e+00],
[ 1.00000000e+00, 1.00000000e+00],
[ 2.00000000e+00, 2.00000000e+00],
...,
[ 2.71530000e+03, 2.76210000e+03]])
See also:
convert()
"""
data = numpy.array(self.convert("[times, ids]"), numpy.float32)
data = numpy.transpose(data)
return data
#############################################################
## Object Loaders. Functions used to create NeuroTools
## objects from data generated by pyNN (the most simple form
## supported right now)
#############################################################
def load_spikelist(user_file, id_list=None, t_start=None, t_stop=None, dims=None):
"""
Returns a SpikeList object from a file. If the file has been generated by PyNN,
a header should be found with following parameters:
---> dims, dt, id of the first cell, id of the last cell.
They must be specified otherwise. Then the classical PyNN format for text file is:
---> one line per event: absolute time in ms, GID
Inputs:
user_file - the user_file object with read/write methods. By defaults, if a string
is provided, a StandardTextFile object is created
id_list - the list of the recorded ids. Can be an int (meaning cells in
the range (0,..,N)), or a list.
dims - if the cells were aranged on a 2/3D grid, a tuple with the dimensions
t_start - begining of the simulation, in ms.
t_stop - end of the simulation, in ms
If dims, t_start, t_stop or id_list are None, they will be infered from either
the data or from the header. All times are in milliseconds.
The format of the file (text, pickle) will be inferred automatically
"""
spike_loader = DataHandler(user_file)
return spike_loader.load_spikes(id_list=id_list, t_start=t_start, t_stop=t_stop, dims=dims)
def load(user_file, datatype):
"""
Convenient data loader for results produced by pyNN. Return the corresponding
NeuroTools object. Datatype argument may become optionnal in the future, but
for now it is necessary to specify the type of the recorded data. To have a better control
on the parameters of the NeuroTools objects, see the load_*** functions.
Inputs:
user_file - the user_file object with read/write methods. By defaults, if a string
is provided, a StandardTextFile object is created
datatype - A string to specify the type od the data in
's' : spikes
'g' : conductances
'v' : membrane traces
'c' : currents
Examples:
>> load("simulation.dat",'v')
>> load("spikes.dat",'s')
>> load(StandardPickleFile("simulation.dat"), 'g')
>> load(StandardTextFile("test.dat"), 's')
See also:
load_spikelist, load_conductancelist, load_vmlist, load_currentlist
"""
if datatype == 's':
return load_spikelist(user_file)
elif datatype == 'v':
return load_vmlist(user_file)
elif datatype == 'c':
return load_currentlist(user_file)
elif datatype == 'g':
return load_conductancelist(user_file)
else:
raise Exception("The datatype %s is not handled ! Should be 's','g','c' or 'v'" %datatype)
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
#from spikes import *
_test()
| NeuralEnsemble/NeuroTools | src/signals/spikes.py | Python | gpl-2.0 | 111,721 | [
"NEURON"
] | 0a806b0f90fe8652f01bf908504f6506057b5d6637430632f8ef5a6b26b57619 |
agencies = ["aaa","bbb","ccc","ddd"]
# abbreviation of agencies
agenciesAb = []
agenciesAb[0]='ALL';
agenciesAb[1]='USDA';
agenciesAb[2]='DOC';
agenciesAb[3]='DoD';
agenciesAb[4]='ED';
agenciesAb[5]='DOE';
agenciesAb[6]='HHS';
agenciesAb[7]='DHS';
agenciesAb[8]='HUD';
agenciesAb[9]='DOI';
agenciesAb[10]='DOJ';
agenciesAb[11]='U.S. DOL';
agenciesAb[12]='State';
agenciesAb[13]='DOT';
agenciesAb[14]='Treasury';
agenciesAb[15]='VA';
# //Federal agencies
agenciesAb[16]='ACUS';
agenciesAb[17]='USAID';
agenciesAb[18]='ABMC';
agenciesAb[19]='NRPC';
agenciesAb[20]='AFRH';
agenciesAb[21]='BBG';
agenciesAb[22]='CIA';
agenciesAb[23]='CSB';
agenciesAb[24]='USCCR';
# //agenciesAb[25]='CFP/Committee';
agenciesAb[25]='CPPBSD';
agenciesAb[26]='CFTC';
agenciesAb[27]='CFPB';
agenciesAb[28]='U.S. CPSC';
agenciesAb[29]='CNCS';
agenciesAb[30]='CIGIE';
agenciesAb[31]='CSOSA';
agenciesAb[32]='DNFSB';
agenciesAb[33]='EPA';
agenciesAb[34]='EEOC';
agenciesAb[35]='CEQ';
agenciesAb[36]='OMB';
agenciesAb[37]='ONDCP';
agenciesAb[38]='OSTP';
agenciesAb[39]='USTR';
agenciesAb[40]='Ex-Im Bank';
agenciesAb[41]='FCA';
agenciesAb[42]='FCSIC';
agenciesAb[43]='FCC';
agenciesAb[44]='FDIC';
agenciesAb[45]='FEC';
agenciesAb[46]='FERC';
agenciesAb[47]='FFIEC';
agenciesAb[48]='FHFA';
agenciesAb[49]='FLRA';
agenciesAb[50]='FMC';
agenciesAb[51]='FMCS';
agenciesAb[52]='FMSHRC';
agenciesAb[53]='FOMC';
agenciesAb[54]='FRB';
agenciesAb[55]='FRTIB';
agenciesAb[56]='FTC';
agenciesAb[57]='GSA';
agenciesAb[58]='IMLS';
agenciesAb[59]='IAF';
agenciesAb[60]='LSC';
agenciesAb[61]='MSPB';
agenciesAb[62]='MCC';
agenciesAb[63]='NASA';
agenciesAb[64]='NARA';
agenciesAb[65]='NCPC';
agenciesAb[66]='NCUA';
agenciesAb[67]='NEA';
agenciesAb[68]='NEH';
agenciesAb[69]='NIGC';
agenciesAb[70]='NLRB';
agenciesAb[71]='NMB';
agenciesAb[72]='NSF';
agenciesAb[73]='NTSB';
agenciesAb[74]='USNRC';
agenciesAb[75]='OSHRC';
agenciesAb[76]='OGE';
agenciesAb[77]='ONHIR';
agenciesAb[78]='OPM';
agenciesAb[79]='OSC';
agenciesAb[80]='ODNI';
agenciesAb[81]='OPIC';
agenciesAb[82]='PC';
agenciesAb[83]='PBGC';
agenciesAb[84]='PRC';
agenciesAb[85]='PCLOB';
agenciesAb[86]='RATB';
agenciesAb[87]='US RRB';
agenciesAb[88]='SEC';
agenciesAb[89]='SSS';
agenciesAb[90]='SBA';
agenciesAb[91]='SSA';
agenciesAb[92]='SIGAR';
agenciesAb[93]='SIGIR';
agenciesAb[94]='STB';
agenciesAb[95]='TVA';
agenciesAb[96]='USAB';
agenciesAb[97]='US ADF';
agenciesAb[98]='CO';
agenciesAb[99]='USIBWC';
agenciesAb[100]='USITC';
agenciesAb[101]='USPS';
agenciesAb[102]='USTDA';
# agenciesAb[95]='OFHEO';
# Where to file of agencies
agenciesFile[0]='ALL';
agenciesFile[1]='USDA';
agenciesFile[2]='DOC';
agenciesFile[3]='DoD';
agenciesFile[4]='ED';
agenciesFile[5]='DOE';
agenciesFile[6]='HHS';
agenciesFile[7]='DHS';
agenciesFile[8]='HUD';
agenciesFile[9]='DOI';
agenciesFile[10]='DOJ';
agenciesFile[11]='U.S. DOL';
agenciesFile[12]='State';
agenciesFile[13]='DOT';
agenciesFile[14]='Treasury';
agenciesFile[15]='VA';
# //Federal agencies
agenciesFile[16]='ACUS';
agenciesFile[17]='USAID';
agenciesFile[18]='ABMC';
agenciesFile[19]='NRPC';
agenciesFile[20]='AFRH';
agenciesFile[21]='BBG';
agenciesFile[22]='CIA';
agenciesFile[23]='CSB';
agenciesFile[24]='USCCR';
agenciesFile[25]='CPPBSD';
agenciesFile[26]='CFTC';
agenciesFile[27]='CFPB';
agenciesFile[28]='U.S. CPSC';
agenciesFile[29]='CNCS';
agenciesFile[30]='CIGIE';
agenciesFile[31]='CSOSA';
agenciesFile[32]='DNFSB';
agenciesFile[33]='EPA';
agenciesFile[34]='EEOC';
agenciesFile[35]='CEQ';
agenciesFile[36]='OMB';
agenciesFile[37]='ONDCP';
agenciesFile[38]='OSTP';
agenciesFile[39]='USTR';
agenciesFile[40]='Ex-Im Bank';
agenciesFile[41]='FCA';
agenciesFile[42]='FCSIC';
agenciesFile[43]='FCC';
agenciesFile[44]='FDIC';
agenciesFile[45]='FEC';
agenciesFile[46]='FERC';
agenciesFile[47]='FFIEC';
agenciesFile[48]='FHFA';
agenciesFile[49]='FLRA';
agenciesFile[50]='FMC';
agenciesFile[51]='FMCS';
agenciesFile[52]='FMSHRC';
agenciesFile[53]='FOMC';
agenciesFile[54]='FRB';
agenciesFile[55]='FRTIB';
agenciesFile[56]='FTC';
agenciesFile[57]='GSA';
agenciesFile[58]='IMLS';
agenciesFile[59]='IAF';
agenciesFile[60]='LSC';
agenciesFile[61]='MSPB';
agenciesFile[62]='MCC';
agenciesFile[63]='NASA';
agenciesFile[64]='NARA';
agenciesFile[65]='NCPC';
agenciesFile[66]='NCUA';
agenciesFile[67]='NEA';
agenciesFile[68]='NEH';
agenciesFile[69]='NIGC';
agenciesFile[70]='NLRB';
agenciesFile[71]='NMB';
agenciesFile[72]='NSF';
agenciesFile[73]='NTSB';
agenciesFile[74]='USNRC';
agenciesFile[75]='OSHRC';
agenciesFile[76]='OGE';
agenciesFile[77]='ONHIR';
agenciesFile[78]='OPM';
agenciesFile[79]='OSC';
agenciesFile[80]='ODNI';
agenciesFile[81]='OPIC';
agenciesFile[82]='PC';
agenciesFile[83]='PBGC';
agenciesFile[84]='PRC';
agenciesFile[85]='PCLOB';
agenciesFile[86]='RATB';
agenciesFile[87]='US RRB';
agenciesFile[88]='SEC';
agenciesFile[89]='SSS';
agenciesFile[90]='SBA';
agenciesFile[91]='SSA';
agenciesFile[92]='SIGAR';
agenciesFile[93]='SIGIR';
agenciesFile[94]='STB';
agenciesFile[95]='TVA';
agenciesFile[96]='USAB';
agenciesFile[97]='US ADF';
agenciesFile[98]='CO';
agenciesFile[99]='USIBWC';
agenciesFile[100]='USITC';
agenciesFile[101]='USPS';
agenciesFile[102]='USTDA';
# agenciesFile[94]='OFHEO';
for agency in agenciesFile:
years = ["http://www.foia.gov/foia/Services/processingSimple.jsp?requestYear=2008", "http://www.foia.gov/foia/Services/processingSimple.jsp?requestYear=2009", "http://www.foia.gov/foia/Services/processingSimple.jsp?requestYear=2010", "http://www.foia.gov/foia/Services/processingSimple.jsp?requestYear=2012", "http://www.foia.gov/foia/Services/processingSimple.jsp?requestYear=2013", "http://www.foia.gov/foia/Services/processingSimple.jsp?requestYear=2014", "http://www.foia.gov/foia/Services/processingSimple.jsp?requestYear=2015"]
for year in years:
# urlC = year + "&agencyName=", agency
urlC = year + "&agencyName="
# print urlC + agency
print urlC + agency | sunlightlabs/foia-data | python/python-for-loop-years.py | Python | gpl-3.0 | 5,890 | [
"ADF"
] | a24a4e862a30c7e9a80f9661efed3f1c9991f2df731932134b6f2ae900732845 |
"""Tests for user-friendly public interface to polynomial functions. """
from sympy.polys.polytools import (
Poly, poly, _polify_basic,
_construct_domain,
_init_poly_from_dict,
_init_poly_from_list,
_init_poly_from_poly,
_init_poly_from_basic,
degree, degree_list,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
cofactors, gcd, lcm, terms_gcd,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, nroots,
cancel,
reduced, groebner,
symmetrize,
horner,
)
from sympy.polys.polyerrors import (
OperationNotSupported,
UnificationFailed,
GeneratorsNeeded,
PolynomialError,
CoercionFailed,
NotAlgebraic,
DomainError,
)
from sympy.polys.monomialtools import (
monomial_lex_cmp,
)
from sympy.polys.polyclasses import GFP, DMP, DMF
from sympy.polys.algebratools import ZZ, QQ, RR, EX
from sympy import S, Integer, Rational, Mul, symbols, sqrt, exp, sin, expand, oo, I
from sympy.utilities.pytest import raises
x,y,z,p,q,r,s,t,u,v,w,a,b,c,d,e = symbols('x,y,z,p,q,r,s,t,u,v,w,a,b,c,d,e')
def _eq(a, b):
for x, y in zip(a, b):
if abs(x-y) > 1e-10:
return False
return True
def test__construct_domain():
assert _construct_domain({(0,): 1, (1,): 2}) == \
(ZZ, {(0,): ZZ(1), (1,): ZZ(2)})
assert _construct_domain({(0,): 1, (1,): 2}, field=True) == \
(QQ, {(0,): QQ(1), (1,): QQ(2)})
assert _construct_domain({(0,): S(1), (1,): S(2)}) == \
(ZZ, {(0,): ZZ(1), (1,): ZZ(2)})
assert _construct_domain({(0,): S(1), (1,): S(2)}, field=True) == \
(QQ, {(0,): QQ(1), (1,): QQ(2)})
assert _construct_domain({(0,): S(1)/2, (1,): S(2)}) == \
(QQ, {(0,): QQ(1,2), (1,): QQ(2)})
assert _construct_domain({(0,): 3.14, (1,): 1, (2,): S(1)/2}) == \
(RR, {(0,): RR(3.14), (1,): RR(1.0), (2,): RR(0.5)})
assert _construct_domain({(0,): 3.14, (1,): sqrt(2)}, extension=None) == \
(EX, {(0,): EX(3.14), (1,): EX(sqrt(2))})
assert _construct_domain({(0,): 3.14, (1,): sqrt(2)}, extension=True) == \
(EX, {(0,): EX(3.14), (1,): EX(sqrt(2))})
assert _construct_domain({(0,): 1, (1,): sqrt(2)}, extension=None) == \
(EX, {(0,): EX(1), (1,): EX(sqrt(2))})
ALG = QQ.algebraic_field(sqrt(2))
assert _construct_domain({(0,): 7, (1,): S(1)/2, (2,): sqrt(2)}, extension=True) == \
(ALG, {(0,): ALG.convert(7), (1,): ALG.convert(S(1)/2), (2,): ALG.convert(sqrt(2))})
ALG = QQ.algebraic_field(sqrt(2)+sqrt(3))
assert _construct_domain({(0,): 7, (1,): sqrt(2), (2,): sqrt(3)}, extension=True) == \
(ALG, {(0,): ALG.convert(7), (1,): ALG.convert(sqrt(2)), (2,): ALG.convert(sqrt(3))})
assert _construct_domain({(0,): 2*x, (1,): 3}) == \
(ZZ[x], {(0,): DMP([2,0], ZZ), (1,): DMP([3], ZZ)})
assert _construct_domain({(0,): 2*x, (1,): 3*y}) == \
(ZZ[x,y], {(0,): DMP([[2],[]], ZZ), (1,): DMP([[3,0]], ZZ)})
assert _construct_domain({(0,): x/2, (1,): 3}) == \
(QQ[x], {(0,): DMP([QQ(1,2),QQ(0)], QQ), (1,): DMP([QQ(3)], QQ)})
assert _construct_domain({(0,): x/2, (1,): 3*y}) == \
(QQ[x,y], {(0,): DMP([[QQ(1,2)],[]], QQ), (1,): DMP([[QQ(3),QQ(0)]], QQ)})
assert _construct_domain({(0,): 2/x, (1,): 3}) == \
(ZZ.frac_field(x), {(0,): DMF(([2], [1,0]), ZZ), (1,): DMF(([3], [1]), ZZ)})
assert _construct_domain({(0,): 2/x, (1,): 3*y}) == \
(ZZ.frac_field(x,y), {(0,): DMF(([[2]], [[1],[]]), ZZ), (1,): DMF(([[3,0]], [[1]]), ZZ)})
def test__init_poly_from_dict():
raises(PolynomialError, "_init_poly_from_dict({0: 1, 1: 2}, x, y, modulus=3, domain=ZZ)")
assert _init_poly_from_dict({0: 1, 1: 2}, x, modulus=3, domain=ZZ) == GFP([2,1], 3, ZZ)
assert _init_poly_from_dict({0: 1, 1: 5}, x, modulus=3, domain=ZZ) == GFP([2,1], 3, ZZ)
assert _init_poly_from_dict({(0,): 1, (1,): 2}, x, modulus=3, domain=ZZ) == GFP([2,1], 3, ZZ)
assert _init_poly_from_dict({(0,): 1, (1,): 5}, x, modulus=3, domain=ZZ) == GFP([2,1], 3, ZZ)
raises(DomainError, "_init_poly_from_dict({0: 1, 1: 2}, x, modulus=3, domain=QQ)")
assert _init_poly_from_dict({0: 1, 1: 2}, x) == DMP([ZZ(2),ZZ(1)], ZZ)
assert _init_poly_from_dict({0: 1, 1: 2}, x, field=True) == DMP([QQ(2),QQ(1)], QQ)
assert _init_poly_from_dict({0: 1, 1: 2}, x, domain=ZZ) == DMP([ZZ(2),ZZ(1)], ZZ)
assert _init_poly_from_dict({0: 1, 1: 2}, x, domain=QQ) == DMP([QQ(2),QQ(1)], QQ)
assert _init_poly_from_dict({(0,): 1, (1,): 2}, x) == DMP([ZZ(2),ZZ(1)], ZZ)
assert _init_poly_from_dict({(0,): 1, (1,): 2}, x, field=True) == DMP([QQ(2),QQ(1)], QQ)
assert _init_poly_from_dict({(0,): 1, (1,): 2}, x, domain=ZZ) == DMP([ZZ(2),ZZ(1)], ZZ)
assert _init_poly_from_dict({(0,): 1, (1,): 2}, x, domain=QQ) == DMP([QQ(2),QQ(1)], QQ)
def test__init_poly_from_list():
raises(PolynomialError, "_init_poly_from_list([[]], x, y)")
assert _init_poly_from_list([2,1], x, modulus=3, domain=ZZ) == GFP([2,1], 3, ZZ)
assert _init_poly_from_list([5,1], x, modulus=3, domain=ZZ) == GFP([2,1], 3, ZZ)
assert _init_poly_from_list([2,1], x) == DMP([ZZ(2),ZZ(1)], ZZ)
assert _init_poly_from_list([2,1], x, field=True) == DMP([QQ(2),QQ(1)], QQ)
assert _init_poly_from_list([2,1], x, domain=ZZ) == DMP([ZZ(2),ZZ(1)], ZZ)
assert _init_poly_from_list([2,1], x, domain=QQ) == DMP([QQ(2),QQ(1)], QQ)
def test__init_poly_from_poly():
f = Poly(x+7, x, domain=ZZ)
g = Poly(x+2, x, modulus=3)
h = Poly(x+y, x, y, domain=ZZ)
assert _init_poly_from_poly(f) == f
assert _init_poly_from_poly(f, domain=ZZ) == (DMP([1,7], ZZ), (x,))
assert _init_poly_from_poly(f, domain=QQ) == (DMP([1,7], QQ), (x,))
assert _init_poly_from_poly(f, modulus=5) == (GFP([1,2], 5, ZZ), (x,))
assert _init_poly_from_poly(f, x) == f
assert _init_poly_from_poly(f, x, domain=ZZ) == (DMP([1,7], ZZ), (x,))
assert _init_poly_from_poly(f, x, domain=QQ) == (DMP([1,7], QQ), (x,))
assert _init_poly_from_poly(f, x, modulus=5) == (GFP([1,2], 5, ZZ), (x,))
assert _init_poly_from_poly(f, y,) == Poly(x + 7, y, domain='ZZ[x]')
raises(CoercionFailed, "_init_poly_from_poly(f, y, domain=ZZ)")
raises(CoercionFailed, "_init_poly_from_poly(f, y, domain=QQ)")
raises(CoercionFailed, "_init_poly_from_poly(f, y, modulus=5)")
assert _init_poly_from_poly(f, x, y) == Poly(x + 7, x, y, domain='ZZ')
assert _init_poly_from_poly(f, x, y, domain=ZZ) == Poly(x + 7, x, y, domain='ZZ')
assert _init_poly_from_poly(f, x, y, domain=QQ) == Poly(x + 7, x, y, domain='QQ')
raises(PolynomialError, "_init_poly_from_poly(f, x, y, modulus=3)")
assert _init_poly_from_poly(g) == g
assert _init_poly_from_poly(g, domain=ZZ) == (GFP([1,2], 3, ZZ), (x,))
raises(DomainError, "_init_poly_from_poly(g, domain=QQ)")
assert _init_poly_from_poly(g, modulus=2) == (GFP([1,0], 2, ZZ), (x,))
assert _init_poly_from_poly(g, x) == g
assert _init_poly_from_poly(g, x, domain=ZZ) == (GFP([1,2], 3, ZZ), (x,))
raises(DomainError, "_init_poly_from_poly(g, x, domain=QQ)")
assert _init_poly_from_poly(g, x, modulus=2) == (GFP([1,0], 2, ZZ), (x,))
raises(PolynomialError, "_init_poly_from_poly(g, y)")
raises(PolynomialError, "_init_poly_from_poly(g, y, domain=ZZ)")
raises(PolynomialError, "_init_poly_from_poly(g, y, domain=QQ)")
raises(PolynomialError, "_init_poly_from_poly(g, y, modulus=3)")
raises(PolynomialError, "_init_poly_from_poly(g, x, y)")
raises(PolynomialError, "_init_poly_from_poly(g, x, y, domain=ZZ)")
raises(PolynomialError, "_init_poly_from_poly(g, x, y, domain=QQ)")
raises(PolynomialError, "_init_poly_from_poly(g, x, y, modulus=3)")
assert _init_poly_from_poly(h) == h
assert _init_poly_from_poly(h, domain=ZZ) == (DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ), (x,y))
assert _init_poly_from_poly(h, domain=QQ) == (DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ), (x,y))
raises(PolynomialError, "_init_poly_from_poly(h, modulus=3)")
assert _init_poly_from_poly(h, x) == Poly(x+y, x, domain=ZZ[y])
raises(CoercionFailed, "_init_poly_from_poly(h, x, domain=ZZ)")
assert _init_poly_from_poly(h, x, domain=ZZ[y]) == Poly(x+y, x, domain=ZZ[y])
raises(CoercionFailed, "_init_poly_from_poly(h, x, domain=QQ)")
assert _init_poly_from_poly(h, x, domain=QQ[y]) == Poly(x+y, x, domain=QQ[y])
raises(CoercionFailed, "_init_poly_from_poly(h, x, modulus=3)")
assert _init_poly_from_poly(h, y) == Poly(x+y, y, domain=ZZ[x])
raises(CoercionFailed, "_init_poly_from_poly(h, y, domain=ZZ)")
assert _init_poly_from_poly(h, y, domain=ZZ[x]) == Poly(x+y, y, domain=ZZ[x])
raises(CoercionFailed, "_init_poly_from_poly(h, y, domain=QQ)")
assert _init_poly_from_poly(h, y, domain=QQ[x]) == Poly(x+y, y, domain=QQ[x])
raises(CoercionFailed, "_init_poly_from_poly(h, y, modulus=3)")
assert _init_poly_from_poly(h, x, y) == h
assert _init_poly_from_poly(h, x, y, domain=ZZ) == (DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ), (x,y))
assert _init_poly_from_poly(h, x, y, domain=QQ) == (DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ), (x,y))
raises(PolynomialError, "_init_poly_from_poly(h, x, y, modulus=3)")
assert _init_poly_from_poly(h, y, x) == (DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ), (y, x))
assert _init_poly_from_poly(h, y, x, domain=ZZ) == (DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ), (y, x))
assert _init_poly_from_poly(h, y, x, domain=QQ) == (DMP([[QQ(1)],[QQ(1),QQ(0)]], ZZ), (y, x))
raises(PolynomialError, "_init_poly_from_poly(h, y, x, modulus=3)")
def test__init_poly_from_basic():
assert _init_poly_from_basic(S(0)) == 0
assert _init_poly_from_basic(S(7)) == 7
assert _init_poly_from_basic(x + 5, modulus=3, domain=ZZ) == (GFP([1,2], 3, ZZ), (x,))
assert _init_poly_from_basic(y + 5, modulus=3, domain=ZZ) == (GFP([1,2], 3, ZZ), (y,))
assert _init_poly_from_basic(x + 5, x, modulus=3, domain=ZZ) == (GFP([1,2], 3, ZZ), (x,))
assert _init_poly_from_basic(y + 5, y, modulus=3, domain=ZZ) == (GFP([1,2], 3, ZZ), (y,))
raises(PolynomialError, "_init_poly_from_basic(x + y, modulus=3, domain=ZZ)")
raises(PolynomialError, "_init_poly_from_basic(x + y, x, y, modulus=3, domain=ZZ)")
assert _init_poly_from_basic(x + 5) == (DMP([1,5], ZZ), (x,))
assert _init_poly_from_basic(y + 5) == (DMP([1,5], ZZ), (y,))
assert _init_poly_from_basic(x + 5, x) == (DMP([1,5], ZZ), (x,))
assert _init_poly_from_basic(y + 5, y) == (DMP([1,5], ZZ), (y,))
assert _init_poly_from_basic(x + 5, domain=ZZ) == (DMP([1,5], ZZ), (x,))
assert _init_poly_from_basic(y + 5, domain=ZZ) == (DMP([1,5], ZZ), (y,))
assert _init_poly_from_basic(x + 5, x, domain=ZZ) == (DMP([1,5], ZZ), (x,))
assert _init_poly_from_basic(y + 5, y, domain=ZZ) == (DMP([1,5], ZZ), (y,))
assert _init_poly_from_basic(x + 5, x, y, domain=ZZ) == (DMP([[1],[5]], ZZ), (x,y))
assert _init_poly_from_basic(y + 5, x, y, domain=ZZ) == (DMP([[1,5]], ZZ), (x,y))
def test_Poly__new__():
raises(PolynomialError, "Poly(x+1, x, x)")
raises(PolynomialError, "Poly(DMP([1,2], ZZ), x, y)")
raises(PolynomialError, "Poly(GFP([1,2], 3, ZZ), x, y)")
raises(PolynomialError, "Poly(DMP([1,2], ZZ), x, domain=ZZ)")
raises(PolynomialError, "Poly(GFP([1,2], 3, ZZ), x, domain=ZZ)")
raises(PolynomialError, "Poly(DMP([1,2], ZZ), x, modulus=3)")
raises(PolynomialError, "Poly(GFP([1,2], 3, ZZ), x, modulus=3)")
raises(PolynomialError, "Poly(x, x, symmetric=True)")
raises(PolynomialError, "Poly(x+y, x, y, domain=ZZ[x])")
raises(PolynomialError, "Poly(x+y, x, y, domain=ZZ[y])")
raises(PolynomialError, "Poly(x+2, x, modulus=3, domain=QQ)")
raises(PolynomialError, "Poly(x+2, x, domain=ZZ, gaussian=True)")
raises(PolynomialError, "Poly(x+2, x, modulus=3, gaussian=True)")
raises(PolynomialError, "Poly(x+2, x, domain=ZZ, extension=[sqrt(3)])")
raises(PolynomialError, "Poly(x+2, x, modulus=3, extension=[sqrt(3)])")
raises(PolynomialError, "Poly(x+2, x, domain=ZZ, extension=True)")
raises(PolynomialError, "Poly(x+2, x, modulus=3, extension=True)")
raises(PolynomialError, "Poly(x+2, x, domain=ZZ, greedy=True)")
raises(PolynomialError, "Poly(x+2, x, domain=QQ, field=True)")
raises(PolynomialError, "Poly(x+2, x, domain=ZZ, greedy=False)")
raises(PolynomialError, "Poly(x+2, x, domain=QQ, field=False)")
raises(PolynomialError, "Poly(x+1, x, modulus=3, order='grlex')")
raises(NotImplementedError, "Poly(x+1, x, order='grlex')")
raises(GeneratorsNeeded, "Poly({1: 2, 0: 1})")
raises(GeneratorsNeeded, "Poly([2, 1])")
raises(GeneratorsNeeded, "Poly(1)")
assert Poly(1, strict=False) == 1
assert Poly(Poly(a*x + b*y, x, y), x) == Poly(a*x + b*y, x)
assert Poly(3*x**2 + 2*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, "Poly(3*x**2/5 + 2*x/5 + 1, domain='ZZ')")
assert Poly(3*x**2/5 + 2*x/5 + 1, domain='QQ').all_coeffs() == [S(3)/5, S(2)/5, 1]
assert _eq(Poly(3*x**2/5 + 2*x/5 + 1, domain='RR').all_coeffs(),
[0.6, 0.4, 1.0])
assert Poly(3.0*x**2 + 2.0*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, "Poly(3.1*x**2 + 2.1*x + 1, domain='ZZ')")
assert Poly(3.1*x**2 + 2.1*x + 1, domain='QQ').all_coeffs() == [S(31)/10, S(21)/10, 1]
assert Poly(3.1*x**2 + 2.1*x + 1, domain='RR').all_coeffs() == [3.1, 2.1, 1.0]
assert Poly({(2,1): 1, (1,2): 2, (1,1): 3}, x, y) == \
Poly(x**2*y + 2*x*y**2 + 3*x*y, x, y)
assert Poly(x**2 + 1, extension=I).get_domain() == QQ.algebraic_field(I)
f = 3*x**5 - x**4 + x**3 - x** 2 + 65538
assert Poly(f, x, modulus=65537, symmetric=True) == \
Poly(3*x**5 - x**4 + x**3 - x** 2 + 1, x, modulus=65537, symmetric=True)
assert Poly(f, x, modulus=65537, symmetric=False) == \
Poly(3*x**5 + 65536*x**4 + x**3 + 65536*x** 2 + 1, x, modulus=65537, symmetric=False)
def test_Poly__args():
assert Poly(x**2 + 1).args == [x**2 + 1]
def test_Poly__gens():
assert Poly((x-p)*(x-q), x).gens == (x,)
assert Poly((x-p)*(x-q), p).gens == (p,)
assert Poly((x-p)*(x-q), q).gens == (q,)
assert Poly((x-p)*(x-q), x, p).gens == (x, p)
assert Poly((x-p)*(x-q), x, q).gens == (x, q)
assert Poly((x-p)*(x-q), x, p, q).gens == (x, p, q)
assert Poly((x-p)*(x-q), p, x, q).gens == (p, x, q)
assert Poly((x-p)*(x-q), p, q, x).gens == (p, q, x)
assert Poly((x-p)*(x-q)).gens == (x, p, q)
assert Poly((x-p)*(x-q), sort='x < p < q').gens == (x, p, q)
assert Poly((x-p)*(x-q), sort='p < x < q').gens == (p, x, q)
assert Poly((x-p)*(x-q), sort='p < q < x').gens == (p, q, x)
assert Poly((x-p)*(x-q), x, p, q, sort='p < q < x').gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt='x').gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt='p').gens == (p, x, q)
assert Poly((x-p)*(x-q), wrt='q').gens == (q, x, p)
assert Poly((x-p)*(x-q), wrt=x).gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt=p).gens == (p, x, q)
assert Poly((x-p)*(x-q), wrt=q).gens == (q, x, p)
assert Poly((x-p)*(x-q), x, p, q, wrt='p').gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt='p', sort='q < x').gens == (p, q, x)
assert Poly((x-p)*(x-q), wrt='q', sort='p < x').gens == (q, p, x)
def test_Poly_unify():
raises(UnificationFailed, "Poly(x).unify(y)")
raises(UnificationFailed, "Poly(x, x, modulus=3).unify(Poly(x, x, modulus=5))")
raises(UnificationFailed, "Poly(x, x, modulus=3).unify(Poly(y, y, modulus=3))")
raises(UnificationFailed, "Poly(x, x, y).unify(Poly(x, x, modulus=3))")
raises(UnificationFailed, "Poly(x, x, y).unify(Poly(x, x, modulus=3))")
raises(UnificationFailed, "Poly(x, x, modulus=3).unify(Poly(x, x, y))")
raises(UnificationFailed, "Poly(x, x, modulus=3).unify(Poly(x, x, y))")
assert Poly(x+1, x).unify(Poly(x+2, x))[2:] == (DMP([1, 1], ZZ), DMP([1, 2], ZZ))
assert Poly(x+1, x, domain='QQ').unify(Poly(x+2, x))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x+1, x).unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x+1, x).unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, domain='QQ').unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x).unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y).unify(Poly(x+2, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, y, domain='QQ').unify(Poly(x+2, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y).unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y).unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, y, domain='QQ').unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y).unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x).unify(Poly(x+2, y, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x+1, x, domain='QQ').unify(Poly(x+2, y, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, x).unify(Poly(x+2, y, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, y, x).unify(Poly(x+2, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x+1, y, x, domain='QQ').unify(Poly(x+2, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, y, x).unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, x, y).unify(Poly(x+2, y, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, y, domain='QQ').unify(Poly(x+2, y, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y).unify(Poly(x+2, y, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, y, x).unify(Poly(x+2, x, y))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x+1, y, x, domain='QQ').unify(Poly(x+2, x, y))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, y, x).unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(2*x+5, x).unify(Poly(x+2, x, modulus=3))[2:] == (GFP([2, 2], 3, ZZ), GFP([1, 2], 3, ZZ))
assert Poly(x+2, x, modulus=3).unify(Poly(2*x+5, x))[2:] == (GFP([1, 2], 3, ZZ), GFP([2, 2], 3, ZZ))
assert Poly(x+5, x, modulus=3).unify(Poly(x+7, x, modulus=3))[2:] == (GFP([1, 2], 3, ZZ), GFP([1, 1], 3, ZZ))
assert Poly(x+5, x, modulus=3, symmetric=True).unify(Poly(x+7, x, modulus=3, symmetric=False))[2:] == \
(GFP([1, 2], 3, ZZ, symmetric=True), GFP([1, 1], 3, ZZ, symmetric=True))
def test_Poly__analyze_order():
assert Poly._analyze_order({}) is None
assert Poly._analyze_order({'order': 'lex'}) == monomial_lex_cmp
raises(ValueError, "Poly._analyze_order({'order': 1})")
def test_Poly__analyze_domain():
assert Poly._analyze_domain({}) is None
assert Poly._analyze_domain({'domain': ZZ}) == ZZ
assert Poly._analyze_domain({'domain': 'ZZ'}) == ZZ
def test_Poly__parse_domain():
assert Poly._parse_domain(ZZ) == ZZ
assert Poly._parse_domain(QQ) == QQ
assert Poly._parse_domain(EX) == EX
assert Poly._parse_domain(ZZ[x,y]) == ZZ[x,y]
assert Poly._parse_domain('Z') == ZZ
assert Poly._parse_domain('Q') == QQ
assert Poly._parse_domain('ZZ') == ZZ
assert Poly._parse_domain('QQ') == QQ
assert Poly._parse_domain('EX') == EX
raises(ValueError, "Poly._parse_domain('Z[]')")
assert Poly._parse_domain('Z[x]') == ZZ[x]
assert Poly._parse_domain('Q[x]') == QQ[x]
assert Poly._parse_domain('ZZ[x]') == ZZ[x]
assert Poly._parse_domain('QQ[x]') == QQ[x]
assert Poly._parse_domain('Z[x,y]') == ZZ[x,y]
assert Poly._parse_domain('Q[x,y]') == QQ[x,y]
assert Poly._parse_domain('ZZ[x,y]') == ZZ[x,y]
assert Poly._parse_domain('QQ[x,y]') == QQ[x,y]
raises(ValueError, "Poly._parse_domain('Z()')")
assert Poly._parse_domain('Z(x)') == ZZ.frac_field(x)
assert Poly._parse_domain('Q(x)') == QQ.frac_field(x)
assert Poly._parse_domain('ZZ(x)') == ZZ.frac_field(x)
assert Poly._parse_domain('QQ(x)') == QQ.frac_field(x)
assert Poly._parse_domain('Z(x,y)') == ZZ.frac_field(x,y)
assert Poly._parse_domain('Q(x,y)') == QQ.frac_field(x,y)
assert Poly._parse_domain('ZZ(x,y)') == ZZ.frac_field(x,y)
assert Poly._parse_domain('QQ(x,y)') == QQ.frac_field(x,y)
assert Poly._parse_domain('Q<I>') == QQ.algebraic_field(I)
assert Poly._parse_domain('QQ<I>') == QQ.algebraic_field(I)
assert Poly._parse_domain('Q<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
assert Poly._parse_domain('QQ<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
def test_Poly_get_domain():
assert Poly(2*x).get_domain() == ZZ
assert Poly(2*x, domain='ZZ').get_domain() == ZZ
assert Poly(2*x, domain='QQ').get_domain() == QQ
assert Poly(x/2).get_domain() == QQ
raises(CoercionFailed, "Poly(x/2, domain='ZZ')")
assert Poly(x/2, domain='QQ').get_domain() == QQ
assert Poly(0.2*x).get_domain() == RR
def test_Poly_set_domain():
assert Poly(2*x + 1).set_domain(ZZ) == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain('ZZ') == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain(QQ) == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1).set_domain('QQ') == Poly(2*x + 1, domain='QQ')
assert Poly(S(2)/10*x + S(1)/10).set_domain('RR') == Poly(0.2*x + 0.1)
assert Poly(0.2*x + 0.1).set_domain('QQ') == Poly(S(2)/10*x + S(1)/10)
raises(CoercionFailed, "Poly(x/2 + 1).set_domain(ZZ)")
raises(DomainError, "Poly(x + 1, modulus=2).set_domain(QQ)")
def test_Poly__analyze_modulus():
assert Poly._analyze_modulus({}) is None
assert Poly._analyze_modulus({'modulus': 2}) == 2
assert Poly._analyze_modulus({'modulus': Integer(2)}) == 2
def test_Poly__parse_modulus():
assert Poly._parse_modulus(5) == 5
assert Poly._parse_modulus(Integer(5)) == 5
raises(ValueError, "Poly._parse_modulus(1)")
raises(ValueError, "Poly._parse_modulus(x)")
def test_Poly_get_modulus():
Poly(x**2 + 1, modulus=2).get_modulus() == 2
raises(PolynomialError, "Poly(x**2 + 1).get_modulus()")
def test_Poly_set_modulus():
Poly(x**2 + 1, modulus=2).set_modulus(7) == Poly(x**2 + 1, modulus=7)
Poly(x**2 + 5, modulus=7).set_modulus(2) == Poly(x**2 + 1, modulus=2)
Poly(x**2 + 1).set_modulus(2) == Poly(x**2 + 1, modulus=2)
raises(PolynomialError, "Poly(x/2 + 1).set_modulus(2)")
def test_Poly__analyze_extension():
assert Poly._analyze_extension({}) is None
assert Poly._analyze_extension({'extension': []}) is None
assert Poly._analyze_extension({'extension': sqrt(2)}) == set([sqrt(2)])
assert Poly._analyze_extension({'extension': [sqrt(2),sqrt(3)]}) == set([sqrt(2),sqrt(3)])
assert Poly._analyze_extension({'extension': True}) is True
assert Poly._analyze_extension({'extension': False}) is None
assert Poly._analyze_extension({'extension': I}) == set([I])
assert Poly._analyze_extension({'gaussian': True}) == set([I])
raises(PolynomialError, "Poly._analyze_extension({'gaussian': True, 'extension': I})")
raises(PolynomialError, "Poly._analyze_extension({'gaussian': True, 'split': True})")
raises(PolynomialError, "Poly._analyze_extension({'extension': I, 'split': True})")
raises(NotImplementedError, "Poly._analyze_extension({'split': True})")
def test_Poly_abs():
assert Poly(-x+1, x).abs() == abs(Poly(-x+1, x)) == Poly(x+1, x)
def test_Poly_neg():
assert Poly(-x+1, x).neg() == -Poly(-x+1, x) == Poly(x-1, x)
def test_Poly_add():
assert Poly(0, x).add(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) + Poly(0, x) == Poly(0, x)
assert Poly(1, x).add(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) + Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).add(Poly(1, x, y)) == Poly(1, x, y)
assert Poly(0, x, y) + Poly(1, x, y) == Poly(1, x, y)
assert Poly(1, x) + x == Poly(x+1, x)
assert Poly(1, x) + sin(x) == 1+sin(x)
def test_Poly_sub():
assert Poly(0, x).sub(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) - Poly(0, x) == Poly(0, x)
assert Poly(1, x).sub(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) - Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).sub(Poly(1, x, y)) == Poly(-1, x, y)
assert Poly(0, x, y) - Poly(1, x, y) == Poly(-1, x, y)
assert Poly(1, x) - x == Poly(1-x, x)
assert Poly(1, x) - sin(x) == 1-sin(x)
def test_Poly_mul():
assert Poly(0, x).mul(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) * Poly(0, x) == Poly(0, x)
assert Poly(2, x).mul(Poly(4, x)) == Poly(8, x)
assert Poly(2, x, y) * Poly(4, x) == Poly(8, x, y)
assert Poly(4, x).mul(Poly(2, x, y)) == Poly(8, x, y)
assert Poly(4, x, y) * Poly(2, x, y) == Poly(8, x, y)
assert Poly(1, x) * x == Poly(x, x)
assert Poly(1, x) * sin(x) == sin(x)
def test_Poly_sqr():
assert Poly(x*y, x, y).sqr() == Poly(x**2*y**2, x, y)
def test_Poly_pow():
assert Poly(x, x).pow(10) == Poly(x**10, x)
assert Poly(x, x).pow(Integer(10)) == Poly(x**10, x)
assert Poly(2*y, x, y).pow(4) == Poly(16*y**4, x, y)
assert Poly(2*y, x, y).pow(Integer(4)) == Poly(16*y**4, x, y)
assert Poly(7*x*y, x, y)**3 == Poly(343*x**3*y**3, x, y)
assert Poly(x*y+1, x, y)**(-1) == (x*y+1)**(-1)
assert Poly(x*y+1, x, y)**x == (x*y+1)**x
def test_Poly_divmod():
f, g = Poly(x**2), Poly(x)
q, r = g, Poly(0, x)
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
assert divmod(f, x) == (q, r)
assert f // x == q
assert f % x == r
def test_Poly_eq_ne():
assert (Poly(x+y, x, y) == Poly(x+y, x, y)) == True
assert (Poly(x+y, x) == Poly(x+y, x, y)) == False
assert (Poly(x+y, x, y) == Poly(x+y, x)) == False
assert (Poly(x+y, x) == Poly(x+y, x)) == True
assert (Poly(x+y, y) == Poly(x+y, y)) == True
assert (Poly(x+y, x, y) == x+y) == True
assert (Poly(x+y, x) == x+y) == True
assert (Poly(x+y, x, y) == x+y) == True
assert (Poly(x+y, x) == x+y) == True
assert (Poly(x+y, y) == x+y) == True
assert (Poly(x+y, x, y) != Poly(x+y, x, y)) == False
assert (Poly(x+y, x) != Poly(x+y, x, y)) == True
assert (Poly(x+y, x, y) != Poly(x+y, x)) == True
assert (Poly(x+y, x) != Poly(x+y, x)) == False
assert (Poly(x+y, y) != Poly(x+y, y)) == False
assert (Poly(x+y, x, y) != x+y) == False
assert (Poly(x+y, x) != x+y) == False
assert (Poly(x+y, x, y) != x+y) == False
assert (Poly(x+y, x) != x+y) == False
assert (Poly(x+y, y) != x+y) == False
assert (Poly(x, x) == sin(x)) == False
assert (Poly(x, x) != sin(x)) == True
def test_Poly_nonzero():
assert not bool(Poly(0, x)) == True
assert not bool(Poly(1, x)) == False
def test_Poly_properties():
assert Poly(0, x).is_zero == True
assert Poly(1, x).is_zero == False
assert Poly(1, x).is_one == True
assert Poly(2, x).is_one == False
assert Poly(x-1, x).is_sqf == True
assert Poly((x-1)**2, x).is_sqf == False
assert Poly(x-1, x).is_monic == True
assert Poly(2*x-1, x).is_monic == False
assert Poly(3*x+2, x).is_primitive == True
assert Poly(4*x+2, x).is_primitive == False
assert Poly(1, x).is_ground == True
assert Poly(x, x).is_ground == False
assert Poly(x*y*z+1).is_linear == True
assert Poly(x**2*y*z+1).is_linear == False
assert Poly(x*y).is_monomial == True
assert Poly(x*y+1).is_monomial == False
assert Poly(x*y+x).is_homogeneous == True
assert Poly(x*y+x+1).is_homogeneous == False
assert Poly(x).is_univariate == True
assert Poly(x*y).is_univariate == False
assert Poly(x*y).is_multivariate == True
assert Poly(x).is_multivariate == False
def test_Poly_is_irreducible():
assert Poly(7*x + 3, modulus=11).is_irreducible == True
assert Poly(7*x**2 + 3*x + 1, modulus=11).is_irreducible == False
def test_Poly_replace():
assert Poly(x+1).replace(x) == Poly(x+1)
assert Poly(x+1).replace(y) == Poly(y+1)
raises(PolynomialError, "Poly(x+y).replace(z)")
assert Poly(x+1).replace(x, x) == Poly(x+1)
assert Poly(x+1).replace(x, y) == Poly(y+1)
assert Poly(x+y).replace(x, x) == Poly(x+y)
assert Poly(x+y).replace(x, z) == Poly(z+y, z, y)
assert Poly(x+y).replace(y, y) == Poly(x+y)
assert Poly(x+y).replace(y, z) == Poly(x+z, x, z)
raises(PolynomialError, "Poly(x+y).replace(x, y)")
raises(PolynomialError, "Poly(x+y).replace(z, t)")
assert Poly(x+y, x).replace(x, z) == Poly(z+y, z)
assert Poly(x+y, y).replace(y, z) == Poly(x+z, z)
raises(PolynomialError, "Poly(x+y, x).replace(x, y)")
raises(PolynomialError, "Poly(x+y, y).replace(y, x)")
def test_Poly_reorder():
raises(PolynomialError, "Poly(x+y).reorder(x, z)")
assert Poly(x + y, x, y).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, y, x).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, y, x).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, x, y).reorder(wrt=x) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(wrt=y) == Poly(x + y, y, x)
def test_Poly_to_ring():
assert Poly(2*x+1, domain='ZZ').to_ring() == Poly(2*x+1, domain='ZZ')
assert Poly(2*x+1, domain='QQ').to_ring() == Poly(2*x+1, domain='ZZ')
raises(CoercionFailed, "Poly(x/2+1).to_ring()")
def test_Poly_to_field():
assert Poly(2*x+1, domain='ZZ').to_field() == Poly(2*x+1, domain='QQ')
assert Poly(2*x+1, domain='QQ').to_field() == Poly(2*x+1, domain='QQ')
assert Poly(x/2+1, domain='QQ').to_field() == Poly(x/2+1, domain='QQ')
def test_Poly_coeffs():
assert Poly(0, x).coeffs() == [0]
assert Poly(1, x).coeffs() == [1]
assert Poly(2*x+1, x).coeffs() == [2,1]
assert Poly(7*x**2+2*x+1, x).coeffs() == [7,2,1]
assert Poly(7*x**4+2*x+1, x).coeffs() == [7,2,1]
def test_Poly_monoms():
assert Poly(0, x).monoms() == [(0,)]
assert Poly(1, x).monoms() == [(0,)]
assert Poly(2*x+1, x).monoms() == [(1,),(0,)]
assert Poly(7*x**2+2*x+1, x).monoms() == [(2,),(1,),(0,)]
assert Poly(7*x**4+2*x+1, x).monoms() == [(4,),(1,),(0,)]
def test_Poly_terms():
assert Poly(0, x).terms() == [((0,), 0)]
assert Poly(1, x).terms() == [((0,), 1)]
assert Poly(2*x+1, x).terms() == [((1,), 2),((0,), 1)]
assert Poly(7*x**2+2*x+1, x).terms() == [((2,), 7),((1,), 2),((0,), 1)]
assert Poly(7*x**4+2*x+1, x).terms() == [((4,), 7),((1,), 2),((0,), 1)]
def test_Poly_all_coeffs():
assert Poly(0, x).all_coeffs() == [0]
assert Poly(1, x).all_coeffs() == [1]
assert Poly(2*x+1, x).all_coeffs() == [2,1]
assert Poly(7*x**2+2*x+1, x).all_coeffs() == [7,2,1]
assert Poly(7*x**4+2*x+1, x).all_coeffs() == [7,0,0,2,1]
def test_Poly_all_monoms():
assert Poly(0, x).all_monoms() == [(0,)]
assert Poly(1, x).all_monoms() == [(0,)]
assert Poly(2*x+1, x).all_monoms() == [(1,),(0,)]
assert Poly(7*x**2+2*x+1, x).all_monoms() == [(2,),(1,),(0,)]
assert Poly(7*x**4+2*x+1, x).all_monoms() == [(4,),(3,),(2,),(1,),(0,)]
def test_Poly_all_terms():
assert Poly(0, x).all_terms() == [((0,), 0)]
assert Poly(1, x).all_terms() == [((0,), 1)]
assert Poly(2*x+1, x).all_terms() == [((1,), 2),((0,), 1)]
assert Poly(7*x**2+2*x+1, x).all_terms() == [((2,), 7),((1,), 2),((0,), 1)]
assert Poly(7*x**4+2*x+1, x).all_terms() == [((4,), 7),((3,),0),((2,),0),((1,), 2),((0,), 1)]
def test_Poly_length():
assert Poly(0, x).length() == 0
assert Poly(1, x).length() == 1
assert Poly(x, x).length() == 1
assert Poly(x+1, x).length() == 2
assert Poly(x**2+1, x).length() == 2
assert Poly(x**2+x+1, x).length() == 3
def test_Poly_as_dict():
assert Poly(0, x).as_dict() == {}
assert Poly(0, x, y, z).as_dict() == {}
assert Poly(1, x).as_dict() == {(0,): 1}
assert Poly(1, x, y, z).as_dict() == {(0,0,0): 1}
assert Poly(x**2+3, x).as_dict() == {(2,): 1, (0,): 3}
assert Poly(x**2+3, x, y, z).as_dict() == {(2,0,0): 1, (0,0,0): 3}
assert Poly(3*x**2*y*z**3+4*x*y+5*x*z).as_dict() == {(2,1,3): 3, (1,1,0): 4, (1,0,1): 5}
def test_Poly_as_basic():
assert Poly(0, x).as_basic() == 0
assert Poly(0, x, y, z).as_basic() == 0
assert Poly(1, x).as_basic() == 1
assert Poly(1, x, y, z).as_basic() == 1
assert Poly(x**2+3, x).as_basic() == x**2+3
assert Poly(x**2+3, x, y, z).as_basic() == x**2+3
assert Poly(3*x**2*y*z**3+4*x*y+5*x*z).as_basic() == 3*x**2*y*z**3+4*x*y+5*x*z
def test_Poly_lift():
assert Poly(x**4 - I*x + 17*I, x, gaussian=True).lift() == \
Poly(x**16 + 2*x**10 + 578*x**8 + x**4 - 578*x**2 + 83521, x, domain='QQ')
def test_Poly_deflate():
assert Poly(0, x).deflate() == ((1,), Poly(0, x))
assert Poly(1, x).deflate() == ((1,), Poly(1, x))
assert Poly(x, x).deflate() == ((1,), Poly(x, x))
assert Poly(x**2, x).deflate() == ((2,), Poly(x, x))
assert Poly(x**17, x).deflate() == ((17,), Poly(x, x))
assert Poly(x**2*y*z**11+x**4*z**11).deflate() == ((2,1,11), Poly(x*y*z+x**2*z))
def test_Poly__gen_to_level():
assert Poly(1, x, y)._gen_to_level(-2) == 0
assert Poly(1, x, y)._gen_to_level(-1) == 1
assert Poly(1, x, y)._gen_to_level( 0) == 0
assert Poly(1, x, y)._gen_to_level( 1) == 1
raises(PolynomialError, "Poly(1, x, y)._gen_to_level(-3)")
raises(PolynomialError, "Poly(1, x, y)._gen_to_level( 2)")
assert Poly(1, x, y)._gen_to_level(x) == 0
assert Poly(1, x, y)._gen_to_level(y) == 1
assert Poly(1, x, y)._gen_to_level('x') == 0
assert Poly(1, x, y)._gen_to_level('y') == 1
raises(PolynomialError, "Poly(1, x, y)._gen_to_level(z)")
raises(PolynomialError, "Poly(1, x, y)._gen_to_level('z')")
def test_Poly_degree():
assert Poly(0, x).degree() ==-1
assert Poly(1, x).degree() == 0
assert Poly(x, x).degree() == 1
assert Poly(0, x).degree(gen=0) ==-1
assert Poly(1, x).degree(gen=0) == 0
assert Poly(x, x).degree(gen=0) == 1
assert Poly(0, x).degree(gen=x) ==-1
assert Poly(1, x).degree(gen=x) == 0
assert Poly(x, x).degree(gen=x) == 1
assert Poly(0, x).degree(gen='x') ==-1
assert Poly(1, x).degree(gen='x') == 0
assert Poly(x, x).degree(gen='x') == 1
raises(PolynomialError, "Poly(1, x).degree(gen=1)")
raises(PolynomialError, "Poly(1, x).degree(gen=y)")
raises(PolynomialError, "Poly(1, x).degree(gen='y')")
assert Poly(1, x, y).degree() == 0
assert Poly(2*y, x, y).degree() == 0
assert Poly(x*y, x, y).degree() == 1
assert Poly(1, x, y).degree(gen=x) == 0
assert Poly(2*y, x, y).degree(gen=x) == 0
assert Poly(x*y, x, y).degree(gen=x) == 1
assert Poly(1, x, y).degree(gen=y) == 0
assert Poly(2*y, x, y).degree(gen=y) == 1
assert Poly(x*y, x, y).degree(gen=y) == 1
assert degree(1, x) == 0
assert degree(x, x) == 1
assert degree(x*y**2, gen=x) == 1
assert degree(x*y**2, gen=y) == 2
assert degree(x*y**2, x, y) == 1
assert degree(x*y**2, y, x) == 2
raises(GeneratorsNeeded, "degree(1)")
def test_Poly_degree_list():
assert Poly(0, x).degree_list() == (-1,)
assert Poly(0, x, y).degree_list() == (-1,-1)
assert Poly(0, x, y, z).degree_list() == (-1,-1,-1)
assert Poly(1, x).degree_list() == (0,)
assert Poly(1, x, y).degree_list() == (0,0)
assert Poly(1, x, y, z).degree_list() == (0,0,0)
assert Poly(x**2*y+x**3*z**2+1).degree_list() == (3,1,2)
assert degree_list(1, x) == (0,)
assert degree_list(x, x) == (1,)
assert degree_list(x*y**2) == (1,2)
raises(GeneratorsNeeded, "degree_list(1)")
def test_Poly_total_degree():
assert Poly(x**2*y+x**3*z**2+1).total_degree() == 6
def test_Poly_LC():
assert Poly(0, x).LC() == 0
assert Poly(1, x).LC() == 1
assert Poly(2*x**2+x, x).LC() == 2
def test_Poly_TC():
assert Poly(0, x).TC() == 0
assert Poly(1, x).TC() == 1
assert Poly(2*x**2+x, x).TC() == 0
def test_Poly_EC():
assert Poly(0, x).EC() == 0
assert Poly(1, x).EC() == 1
assert Poly(2*x**2+x, x).EC() == 1
def test_Poly_nth():
assert Poly(0, x).nth(0) == 0
assert Poly(0, x).nth(1) == 0
assert Poly(1, x).nth(0) == 1
assert Poly(1, x).nth(1) == 0
assert Poly(x**8, x).nth(0) == 0
assert Poly(x**8, x).nth(7) == 0
assert Poly(x**8, x).nth(8) == 1
assert Poly(x**8, x).nth(9) == 0
assert Poly(3*x*y**2 + 1).nth(0, 0) == 1
assert Poly(3*x*y**2 + 1).nth(1, 2) == 3
def test_Poly_LM():
assert Poly(0, x).LM() == (0,)
assert Poly(1, x).LM() == (0,)
assert Poly(2*x**2+x, x).LM() == (2,)
def test_Poly_EM():
assert Poly(0, x).EM() == (0,)
assert Poly(1, x).EM() == (0,)
assert Poly(2*x**2+x, x).EM() == (1,)
def test_Poly_LT():
assert Poly(0, x).LT() == ((0,), 0)
assert Poly(1, x).LT() == ((0,), 1)
assert Poly(2*x**2+x, x).LT() == ((2,), 2)
def test_Poly_ET():
assert Poly(0, x).ET() == ((0,), 0)
assert Poly(1, x).ET() == ((0,), 1)
assert Poly(2*x**2+x, x).ET() == ((1,), 1)
def test_Poly_max_norm():
assert Poly(-1, x).max_norm() == 1
assert Poly( 0, x).max_norm() == 0
assert Poly( 1, x).max_norm() == 1
def test_Poly_l1_norm():
assert Poly(-1, x).l1_norm() == 1
assert Poly( 0, x).l1_norm() == 0
assert Poly( 1, x).l1_norm() == 1
def test_Poly_ground_to_ring():
assert Poly(2*x + 1).ground_to_ring() == (1, Poly(2*x + 1, domain='ZZ'))
assert Poly(x/2 + 1).ground_to_ring() == (2, Poly(x + 2, domain='QQ'))
def test_Poly_integrate():
assert Poly(x + 1).integrate() == Poly(x**2/2 + x)
assert Poly(x + 1).integrate(x) == Poly(x**2/2 + x)
assert Poly(x + 1).integrate((x, 1)) == Poly(x**2/2 + x)
assert Poly(x*y + 1).integrate(x) == Poly(x**2*y/2 + x)
assert Poly(x*y + 1).integrate(y) == Poly(x*y**2/2 + y)
assert Poly(x*y + 1).integrate(x, x) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate(y, y) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate((x, 2)) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate((y, 2)) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate(x, y) == Poly(x**2*y**2/4 + x*y)
assert Poly(x*y + 1).integrate(y, x) == Poly(x**2*y**2/4 + x*y)
def test_Poly_diff():
assert Poly(x**2 + x).diff() == Poly(2*x + 1)
assert Poly(x**2 + x).diff(x) == Poly(2*x + 1)
assert Poly(x**2 + x).diff((x, 1)) == Poly(2*x + 1)
assert Poly(x**2*y**2 + x*y).diff(x) == Poly(2*x*y**2 + y)
assert Poly(x**2*y**2 + x*y).diff(y) == Poly(2*x**2*y + x)
assert Poly(x**2*y**2 + x*y).diff(x, x) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(y, y) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((x, 2)) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((y, 2)) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(x, y) == Poly(4*x*y + 1)
assert Poly(x**2*y**2 + x*y).diff(y, x) == Poly(4*x*y + 1)
def test_Poly_eval():
assert Poly(0, x).eval(7) == 0
assert Poly(1, x).eval(7) == 1
assert Poly(x, x).eval(7) == 7
assert Poly(0, x).eval(7, gen=0) == 0
assert Poly(1, x).eval(7, gen=0) == 1
assert Poly(x, x).eval(7, gen=0) == 7
assert Poly(0, x).eval(7, gen=x) == 0
assert Poly(1, x).eval(7, gen=x) == 1
assert Poly(x, x).eval(7, gen=x) == 7
assert Poly(0, x).eval(7, gen='x') == 0
assert Poly(1, x).eval(7, gen='x') == 1
assert Poly(x, x).eval(7, gen='x') == 7
raises(PolynomialError, "Poly(1, x).eval(7, gen=1)")
raises(PolynomialError, "Poly(1, x).eval(7, gen=y)")
raises(PolynomialError, "Poly(1, x).eval(7, gen='y')")
assert Poly(1, x, y).eval(7) == Poly(1, y)
assert Poly(2*y, x, y).eval(7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(7) == Poly(7*y, y)
assert Poly(1, x, y).eval(7, gen=x) == Poly(1, y)
assert Poly(2*y, x, y).eval(7, gen=x) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(7, gen=x) == Poly(7*y, y)
assert Poly(1, x, y).eval(7, gen=y) == Poly(1, x)
assert Poly(2*y, x, y).eval(7, gen=y) == Poly(14, x)
assert Poly(x*y, x, y).eval(7, gen=y) == Poly(7*x, x)
raises(CoercionFailed, "Poly(x+1, domain='ZZ').eval(S(1)/2)")
def test__polify_basic():
assert _polify_basic(x-1, x**2-1, x) == (Poly(x-1, x), Poly(x**2-1, x))
assert _polify_basic(Poly(x-1, x), x**2-1, x) == (Poly(x-1, x), Poly(x**2-1, x))
assert _polify_basic(x-1, Poly(x**2-1, x), x) == (Poly(x-1, x), Poly(x**2-1, x))
assert _polify_basic(Poly(x-1, x), Poly(x**2-1, x), x) == (Poly(x-1, x), Poly(x**2-1, x))
assert _polify_basic(x-1, x**2-1, x, y) == (Poly(x-1, x, y), Poly(x**2-1, x, y))
assert _polify_basic(Poly(x-1, x), x**2-1, x, y) == (Poly(x-1, x, y), Poly(x**2-1, x, y))
assert _polify_basic(x-1, Poly(x**2-1, x), x, y) == (Poly(x-1, x, y), Poly(x**2-1, x, y))
assert _polify_basic(Poly(x-1, x), Poly(x**2-1, x), x, y) == (Poly(x-1, x, y), Poly(x**2-1, x, y))
assert _polify_basic(x-1, x**2-1) == (Poly(x-1, x), Poly(x**2-1, x))
assert _polify_basic(Poly(x-1, x), x**2-1) == (Poly(x-1, x), Poly(x**2-1, x))
assert _polify_basic(x-1, Poly(x**2-1, x)) == (Poly(x-1, x), Poly(x**2-1, x))
assert _polify_basic(Poly(x-1, x), Poly(x**2-1, x)) == (Poly(x-1, x), Poly(x**2-1, x))
assert _polify_basic(1, x**2-1) == (Poly(1, x), Poly(x**2-1, x))
assert _polify_basic(1, x**2-1) == (Poly(1, x), Poly(x**2-1, x))
assert _polify_basic(1, Poly(x**2-1, x)) == (Poly(1, x), Poly(x**2-1, x))
assert _polify_basic(1, Poly(x**2-1, x)) == (Poly(1, x), Poly(x**2-1, x))
assert _polify_basic(x**2-1, 1) == (Poly(x**2-1, x), Poly(1, x))
assert _polify_basic(x**2-1, 1) == (Poly(x**2-1, x), Poly(1, x))
assert _polify_basic(Poly(x**2-1, x), 1) == (Poly(x**2-1, x), Poly(1, x))
assert _polify_basic(Poly(x**2-1, x), 1) == (Poly(x**2-1, x), Poly(1, x))
raises(CoercionFailed, "_polify_basic(1, 2)")
def test_pdiv():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.pdiv(G) == (Q, R)
assert F.pexquo(G) == Q
assert F.pquo(G) == Q
assert F.prem(G) == R
assert pdiv(f, g) == (q, r)
assert pexquo(f, g) == q
assert pquo(f, g) == q
assert prem(f, g) == r
assert pdiv(f, g, x, y) == (q, r)
assert pexquo(f, g, x, y) == q
assert pquo(f, g, x, y) == q
assert prem(f, g, x, y) == r
assert pdiv(f, g, (x,y)) == (q, r)
assert pexquo(f, g, (x,y)) == q
assert pquo(f, g, (x,y)) == q
assert prem(f, g, (x,y)) == r
assert pdiv(F, G) == (Q, R)
assert pexquo(F, G) == Q
assert pquo(F, G) == Q
assert prem(F, G) == R
assert pdiv(f, g, polys=True) == (Q, R)
assert pexquo(f, g, polys=True) == Q
assert pquo(f, g, polys=True) == Q
assert prem(f, g, polys=True) == R
assert pdiv(F, G, polys=False) == (q, r)
assert pexquo(F, G, polys=False) == q
assert pquo(F, G, polys=False) == q
assert prem(F, G, polys=False) == r
raises(GeneratorsNeeded, "pdiv(4, 2)")
raises(GeneratorsNeeded, "pexquo(4, 2)")
raises(GeneratorsNeeded, "pquo(4, 2)")
raises(GeneratorsNeeded, "prem(4, 2)")
def test_div():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.div(G) == (Q, R)
assert F.exquo(G) == Q
assert F.quo(G) == Q
assert F.rem(G) == R
assert div(f, g) == (q, r)
assert exquo(f, g) == q
assert quo(f, g) == q
assert rem(f, g) == r
assert div(f, g, x, y) == (q, r)
assert exquo(f, g, x, y) == q
assert quo(f, g, x, y) == q
assert rem(f, g, x, y) == r
assert div(f, g, (x,y)) == (q, r)
assert exquo(f, g, (x,y)) == q
assert quo(f, g, (x,y)) == q
assert rem(f, g, (x,y)) == r
assert div(F, G) == (Q, R)
assert exquo(F, G) == Q
assert quo(F, G) == Q
assert rem(F, G) == R
assert div(f, g, polys=True) == (Q, R)
assert exquo(f, g, polys=True) == Q
assert quo(f, g, polys=True) == Q
assert rem(f, g, polys=True) == R
assert div(F, G, polys=False) == (q, r)
assert exquo(F, G, polys=False) == q
assert quo(F, G, polys=False) == q
assert rem(F, G, polys=False) == r
raises(GeneratorsNeeded, "div(4, 2)")
raises(GeneratorsNeeded, "exquo(4, 2)")
raises(GeneratorsNeeded, "quo(4, 2)")
raises(GeneratorsNeeded, "rem(4, 2)")
def test_gcdex():
f, g = 2*x, x**2 - 16
s, t, h = x/32, -Rational(1,16), 1
F, G, S, T, H = [ Poly(u, x, domain='QQ') for u in (f, g, s, t, h) ]
assert F.half_gcdex(G) == (S, H)
assert F.gcdex(G) == (S, T, H)
assert F.invert(G) == S
assert half_gcdex(f, g) == (s, h)
assert gcdex(f, g) == (s, t, h)
assert invert(f, g) == s
assert half_gcdex(f, g, x) == (s, h)
assert gcdex(f, g, x) == (s, t, h)
assert invert(f, g, x) == s
assert half_gcdex(f, g, (x,)) == (s, h)
assert gcdex(f, g, (x,)) == (s, t, h)
assert invert(f, g, (x,)) == s
assert half_gcdex(F, G) == (S, H)
assert gcdex(F, G) == (S, T, H)
assert invert(F, G) == S
assert half_gcdex(f, g, polys=True) == (S, H)
assert gcdex(f, g, polys=True) == (S, T, H)
assert invert(f, g, polys=True) == S
assert half_gcdex(F, G, polys=False) == (s, h)
assert gcdex(F, G, polys=False) == (s, t, h)
assert invert(F, G, polys=False) == s
assert half_gcdex(100, 2004) == (-20, 4)
assert gcdex(100, 2004) == (-20, 1, 4)
assert invert(3, 7) == 5
def test_subresultants():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 2*x - 2
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.subresultants(G) == [F, G, H]
assert subresultants(f, g) == [f, g, h]
assert subresultants(f, g, x) == [f, g, h]
assert subresultants(f, g, (x,)) == [f, g, h]
assert subresultants(F, G) == [F, G, H]
assert subresultants(f, g, polys=True) == [F, G, H]
assert subresultants(F, G, polys=False) == [f, g, h]
raises(GeneratorsNeeded, "subresultants(4, 2)")
def test_resultant():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 0
F, G = Poly(f), Poly(g)
assert F.resultant(G) == h
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == h
assert resultant(f, g, polys=True) == h
assert resultant(F, G, polys=False) == h
f, g, h = x - a, x - b, a - b
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.resultant(G) == H
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == H
assert resultant(f, g, polys=True) == H
assert resultant(F, G, polys=False) == h
raises(GeneratorsNeeded, "resultant(4, 2)")
def test_discriminant():
f, g = x**3 + 3*x**2 + 9*x - 13, -11664
F = Poly(f)
assert F.discriminant() == g
assert discriminant(f) == g
assert discriminant(f, x) == g
assert discriminant(f, (x,)) == g
assert discriminant(F) == g
assert discriminant(f, polys=True) == g
assert discriminant(F, polys=False) == g
f, g = a*x**2 + b*x + c, b**2 - 4*a*c
F, G = Poly(f), Poly(g)
assert F.discriminant() == G
assert discriminant(f) == g
assert discriminant(f, x, a, b, c) == g
assert discriminant(f, (x, a, b, c)) == g
assert discriminant(F) == G
assert discriminant(f, polys=True) == G
assert discriminant(F, polys=False) == g
raises(GeneratorsNeeded, "discriminant(4)")
def test_gcd():
f, g = x**3 - 1, x**2 - 1
s, t = x**2 + x + 1, x + 1
h, r = x - 1, x**4 + x**3 - x - 1
F, G, S, T, H, R = [ Poly(u) for u in (f, g, s, t, h, r) ]
assert F.cofactors(G) == (H, S, T)
assert F.gcd(G) == H
assert F.lcm(G) == R
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == r
assert cofactors(f, g, x) == (h, s, t)
assert gcd(f, g, x) == h
assert lcm(f, g, x) == r
assert cofactors(f, g, (x,)) == (h, s, t)
assert gcd(f, g, (x,)) == h
assert lcm(f, g, (x,)) == r
assert cofactors(F, G) == (H, S, T)
assert gcd(F, G) == H
assert lcm(F, G) == R
assert cofactors(f, g, polys=True) == (H, S, T)
assert gcd(f, g, polys=True) == H
assert lcm(f, g, polys=True) == R
assert cofactors(F, G, polys=False) == (h, s, t)
assert gcd(F, G, polys=False) == h
assert lcm(F, G, polys=False) == r
f, g = x**2 - 1, x - 1.0
h, s, t = g, x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
f, g = x**2 - 1.0, x - 1
h, s, t = g, x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
assert cofactors(8, 6) == (2, 4, 3)
assert gcd(8, 6) == 2
assert lcm(8, 6) == 24
def test_terms_gcd():
assert terms_gcd(1) == 1
assert terms_gcd(1, x) == 1
assert terms_gcd(x**3*y - x*y**3) == x*y*(x**2 - y**2)
assert terms_gcd(2*x**3*y - 2*x*y**3) == 2*x*y*(x**2 - y**2)
assert terms_gcd(x**3*y/2 - x*y**3/2) == x*y/2*(x**2 - y**2)
def test_trunc():
f, g = x**5 + 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 6, x**5 - x**4 + x**2 - x
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f, g = 6*x**5 + 5*x**4 + 4*x**3 + 3*x**2 + 2*x + 1, -x**4 + x**3 - x + 1
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f = Poly(x**2 + 2*x + 3, modulus=5)
assert f.trunc(2) == Poly(x**2 + 1, modulus=2)
def test_monic():
f, g = 2*x - 1, x - S(1)/2
F, G = Poly(f, domain='QQ'), Poly(g)
assert F.monic() == G
assert monic(f) == g
assert monic(f, x) == g
assert monic(f, (x,)) == g
assert monic(F) == G
assert monic(f, polys=True) == G
assert monic(F, polys=False) == g
raises(GeneratorsNeeded, "monic(4)")
assert monic(2*x**2 + 3*x + 4, modulus=5) == x**2 - x + 2
def test_content():
f, F = 4*x + 2, Poly(4*x + 2)
F.content() == 2
content(f) == 2
raises(GeneratorsNeeded, "content(4)")
raises(OperationNotSupported, "Poly(2*x, modulus=3).content()")
def test_primitive():
f, g = 4*x + 2, 2*x + 1
F, G = Poly(f), Poly(g)
assert F.primitive() == (2, G)
assert primitive(f) == (2, g)
assert primitive(f, x) == (2, g)
assert primitive(f, (x,)) == (2, g)
assert primitive(F) == (2, G)
assert primitive(f, polys=True) == (2, G)
assert primitive(F, polys=False) == (2, g)
raises(GeneratorsNeeded, "primitive(4)")
raises(OperationNotSupported, "Poly(2*x, modulus=3).primitive()")
def test_compose():
f = x**12+20*x**10+150*x**8+500*x**6+625*x**4-2*x**3-10*x+9
g = x**4 - 2*x + 9
h = x**3 + 5*x
F, G, H = map(Poly, (f, g, h))
assert G.compose(H) == F
assert compose(g, h) == f
assert compose(g, h, x) == f
assert compose(g, h, (x,)) == f
assert compose(G, H) == F
assert compose(g, h, polys=True) == F
assert compose(G, H, polys=False) == f
assert F.decompose() == [G, H]
assert decompose(f) == [g, h]
assert decompose(f, x) == [g, h]
assert decompose(f, (x,)) == [g, h]
assert decompose(F) == [G, H]
assert decompose(f, polys=True) == [G, H]
assert decompose(F, polys=False) == [g, h]
raises(GeneratorsNeeded, "compose(4, 2)")
raises(GeneratorsNeeded, "decompose(4)")
assert compose(x**2 - y**2, x - y, x, y) == x**2 - 2*x*y
assert compose(x**2 - y**2, x - y, y, x) == -y**2 + 2*x*y
def test_sturm():
f, F = x, Poly(x, domain='QQ')
g, G = 1, Poly(1, x, domain='QQ')
assert F.sturm() == [F, G]
assert sturm(f) == [f, g]
assert sturm(f, x) == [f, g]
assert sturm(f, (x,)) == [f, g]
assert sturm(F) == [F, G]
assert sturm(f, polys=True) == [F, G]
assert sturm(F, polys=False) == [f, g]
raises(GeneratorsNeeded, "sturm(4)")
def test_sqf_norm():
assert sqf_norm(x**2-2, extension=sqrt(3)) == \
(1, x**2 - 2*sqrt(3)*x + 1, x**4 - 10*x**2 + 1)
assert sqf_norm(x**2-3, extension=sqrt(2)) == \
(1, x**2 - 2*sqrt(2)*x - 1, x**4 - 10*x**2 + 1)
assert Poly(x**2-2, extension=sqrt(3)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(3)*x + 1, x, extension=sqrt(3)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
assert Poly(x**2-3, extension=sqrt(2)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(2)*x - 1, x, extension=sqrt(2)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
def test_sqf():
f = x**5 - x**3 - x**2 + 1
g = x**3 + 2*x**2 + 2*x + 1
h = x - 1
p = x**4 + x**3 - x - 1
F, G, H, P = map(Poly, (f, g, h, p))
assert F.sqf_part() == P
assert sqf_part(f) == p
assert sqf_part(f, x) == p
assert sqf_part(f, (x,)) == p
assert sqf_part(F) == P
assert sqf_part(f, polys=True) == P
assert sqf_part(F, polys=False) == p
assert F.sqf_list() == (1, [(G, 1), (H, 2)])
assert sqf_list(f) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, x) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, (x,)) == (1, [(g, 1), (h, 2)])
assert sqf_list(F) == (1, [(G, 1), (H, 2)])
assert sqf_list(f, polys=True) == (1, [(G, 1), (H, 2)])
assert sqf_list(F, polys=False) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, include=True) == [(g, 1), (h, 2)]
raises(GeneratorsNeeded, "sqf_part(4)")
raises(GeneratorsNeeded, "sqf_list(4)")
assert sqf(1) == 1
assert sqf(1, frac=True) == 1
assert sqf(f) == g*h**2
assert sqf(f, x) == g*h**2
assert sqf(f, (x,)) == g*h**2
d = x**2 + y**2
assert sqf(f/d, frac=True) == (g*h**2)/d
assert sqf(f/d, x, frac=True) == (g*h**2)/d
assert sqf(f/d, (x,), frac=True) == (g*h**2)/d
assert sqf(x - 1) == x - 1
assert sqf(-x - 1) == -x - 1
assert sqf(x - 1) != Mul(1, x - 1, evaluate=False)
assert sqf(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert sqf((6*x - 10)/(3*x - 6), frac=True) == S(2)/3*((3*x - 5)/(x - 2))
def test_factor():
f = x**5 - x**3 - x**2 + 1
u = x + 1
v = x - 1
w = x**2 + x + 1
F, U, V, W = map(Poly, (f, u, v, w))
assert F.factor_list() == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, x) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, (x,)) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(F) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f, polys=True) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(F, polys=False) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, include=True) == [(u, 1), (v, 2), (w, 1)]
raises(GeneratorsNeeded, "factor_list(4)")
assert factor(1) == 1
assert factor(1, frac=True) == 1
assert factor(f) == u*v**2*w
assert factor(f, x) == u*v**2*w
assert factor(f, (x,)) == u*v**2*w
g, p, q = x**2 - y**2, x - y, x + y
assert factor(f/g, frac=True) == (u*v**2*w)/(p*q)
assert factor(f/g, x, frac=True) == (u*v**2*w)/(p*q)
assert factor(f/g, (x,), frac=True) == (u*v**2*w)/(p*q)
f = Poly(sin(1)*x + 1, x, domain=EX)
assert f.factor_list() == (1, [(f, 1)])
f = x**4 + 1
assert factor(f) == f
assert factor(f, extension=I) == (x**2 - I)*(x**2 + I)
assert factor(f, gaussian=True) == (x**2 - I)*(x**2 + I)
assert factor(f, extension=sqrt(2)) == (x**2 + sqrt(2)*x + 1)*(x**2 - sqrt(2)*x + 1)
f = x**2 + 2*sqrt(2)*x + 2
assert factor(f, extension=sqrt(2)) == (x + sqrt(2))**2
assert factor(f**3, extension=sqrt(2)) == (x + sqrt(2))**6
assert factor(x**2 - 2*y**2, extension=sqrt(2)) == \
(x + sqrt(2)*y)*(x - sqrt(2)*y)
assert factor(2*x**2 - 4*y**2, extension=sqrt(2)) == \
2*((x + sqrt(2)*y)*(x - sqrt(2)*y))
assert factor(x - 1) == x - 1
assert factor(-x - 1) == -x - 1
assert factor(x - 1) != Mul(1, x - 1, evaluate=False)
assert factor(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert factor((6*x - 10)/(3*x - 6), frac=True) == S(2)/3*((3*x - 5)/(x - 2))
assert factor(x**11 + x + 1, modulus=65537, symmetric=True) == \
(x**2 + x + 1)*(x**9 - x**8 + x**6 - x**5 + x**3 - x** 2 + 1)
assert factor(x**11 + x + 1, modulus=65537, symmetric=False) == \
(x**2 + x + 1)*(x**9 + 65536*x**8 + x**6 + 65536*x**5 + x**3 + 65536*x** 2 + 1)
def test_intervals():
f = Poly((2*x/5 - S(17)/3)*(4*x + S(1)/257))
assert f.intervals(sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals(sqf=False) == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(eps=S(1)/10) == f.intervals(eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/100) == f.intervals(eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/1000) == f.intervals(eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/10000) == f.intervals(eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = (2*x/5 - S(17)/3)*(4*x + S(1)/257)
assert intervals(f, sqf=True) == [(-1, 0), (14, 15)]
assert intervals(f, sqf=False) == [((-1, 0), 1), ((14, 15), 1)]
assert intervals(f, eps=S(1)/10) == intervals(f, eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/100) == intervals(f, eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/1000) == intervals(f, eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/10000) == intervals(f, eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = Poly((x**2 - 2)*(x**2-3)**7*(x+1)*(7*x+3)**3)
assert f.intervals() == \
[((-2, -S(3)/2), 7), ((-S(3)/2, -1), 1),
((-1, -1), 1), ((-1, 0), 3),
((1, S(3)/2), 1), ((S(3)/2, 2), 7)]
raises(GeneratorsNeeded, "intervals(0)")
def test_nroots():
assert Poly(x**2 - 1, x).nroots() == [-1.0, 1.0]
assert Poly(x**2 + 1, x).nroots() == [-I, I]
roots, error = Poly(x**2 - 1, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
roots, error = Poly(x**2 + 1, x).nroots(error=True)
assert roots == [-I, I] and error < 1e25;
roots, error = Poly(x**2/3 - S(1)/3, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
roots, error = Poly(x**2/3 + S(1)/3, x).nroots(error=True)
assert roots == [-I, I] and error < 1e25;
assert Poly(x**2 + 2*I, x).nroots() == [-1.0 + I, 1.0 - I]
assert Poly(x**2 + 2*I, x, extension=I).nroots() == [-1.0 + I, 1.0 - I]
assert Poly(0.2*x + 0.1).nroots() == [-0.5]
raises(DomainError, "Poly(x+y, x).nroots()")
raises(PolynomialError, "Poly(x+y).nroots()")
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots, error = nroots(x**2 - 1, error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
raises(GeneratorsNeeded, "nroots(0)")
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2-4, 2*x-2, 2*x+2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2-y)/(x-y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2-y**2)/(x-y), x) == x + y
assert cancel((x**2-y**2)/(x-y), y) == x + y
assert cancel((x**2-y**2)/(x-y)) == x + y
assert cancel((x**3-1)/(x**2-1)) == (x**2+x+1)/(x+1)
assert cancel((x**3/2-S(1)/2)/(x**2-1)) == (x**2+x+1)/(2*x+2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
def test_reduced():
raises(PolynomialError, "reduced(x, [x], x, modulus=3)")
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert reduced(f, G) == (Q, r)
assert reduced(f, G, x, y) == (Q, r)
Q = [Poly(2*x, x, y), Poly(1, x, y)]
r = Poly(x**2 + y**2 + y, x, y)
assert reduced(f, G, polys=True) == (Q, r)
assert reduced(f, G, x, y, polys=True) == (Q, r)
def test_groebner():
raises(PolynomialError, "groebner([x], x, modulus=3)")
assert groebner([], x, y, z) == []
assert groebner([x**2 + 1, y**4*x + x**3],
x, y, order='lex') == [1 + x**2, -1 + y**4]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3],
x, y, z, order='grevlex') == [-1 + y**4, z**3, 1 + x**2]
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex', polys=True) == \
[Poly(1 + x**2, x, y), Poly(-1 + y**4, x, y)]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex', polys=True) == \
[Poly(-1 + y**4, x, y, z), Poly(z**3, x, y, z), Poly(1 + x**2, x, y, z)]
def test_symmetrize():
assert symmetrize(0) == (0, 0)
assert symmetrize(1) == (1, 0)
assert symmetrize(0, x, y, z) == (0, 0)
assert symmetrize(1, x, y, z) == (1, 0)
assert symmetrize(0, formal=True) == (0, 0, {})
assert symmetrize(1, formal=True) == (1, 0, {})
s1 = x + y + z
s2 = x*y + x*z + y*z
s3 = x*y*z
assert symmetrize(x) == (x, 0)
assert symmetrize(x + 1) == (x + 1, 0)
assert symmetrize(x, x, y) == (x + y, -y)
assert symmetrize(x + 1, x, y) == (x + y + 1, -y)
assert symmetrize(x, x, y, z) == (s1, -y - z)
assert symmetrize(x + 1, x, y, z) == (s1 + 1, -y - z)
assert symmetrize(x**2, x, y, z) == (s1**2 - 2*s2, -y**2 - z**2)
assert symmetrize(x**2 + y**2) == (-2*x*y + (x + y)**2, 0)
assert symmetrize(x**2 - y**2) == (-2*x*y + (x + y)**2, -2*y**2)
assert symmetrize(x**3 + y**2 + a*x**2 + b*y**3, x, y) == \
(-3*x*y*(x + y) - 2*a*x*y + a*(x + y)**2 + (x + y)**3, y**2*(1 - a) - y**3*(1 - b))
def test_horner():
assert horner(0) == 0
assert horner(1) == 1
assert horner(x) == x
assert horner(x + 1) == x + 1
assert horner(x**2 + 1) == x**2 + 1
assert horner(x**2 + x) == (x + 1)*x
assert horner(x**2 + x + 1) == (x + 1)*x + 1
assert horner(9*x**4 + 8*x**3 + 7*x**2 + 6*x + 5) == (((9*x + 8)*x + 7)*x + 6)*x + 5
assert horner(a*x**4 + b*x**3 + c*x**2 + d*x + e) == (((a*x + b)*x + c)*x + d)*x + e
assert horner(4*x**2*y**2 + 2*x**2*y + 2*x*y**2 + x*y, wrt=x) == ((4*y + 2)*x*y + (2*y + 1)*y)*x
assert horner(4*x**2*y**2 + 2*x**2*y + 2*x*y**2 + x*y, wrt=y) == ((4*x + 2)*y*x + (2*x + 1)*x)*y
def test_poly():
assert poly(x) == Poly(x, x)
assert poly(y) == Poly(y, y)
assert poly(x + y) == Poly(x + y, x, y)
assert poly(x + sin(x)) == Poly(x + sin(x), x, sin(x))
assert poly(x + y, wrt=y) == Poly(x + y, y, x)
assert poly(x + sin(x), wrt=sin(x)) == Poly(x + sin(x), sin(x), x)
assert poly(x*y + 2*x*z**2 + 17) == Poly(x*y + 2*x*z**2 + 17, x, y, z)
assert poly(2*(y + z)**2 - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - 1, y, z)
assert poly(x*(y + z)**2 - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - 1, x, y, z)
assert poly(2*x*(y + z)**2 - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - 1, x, y, z)
assert poly(2*(y + z)**2 - x - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - x - 1, x, y, z)
assert poly(x*(y + z)**2 - x - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - x - 1, x, y, z)
assert poly(2*x*(y + z)**2 - x - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - x - 1, x, y, z)
assert poly(x*y + (x + y)**2 + (x + z)**2) == \
Poly(2*x*z + 3*x*y + y**2 + z**2 + 2*x**2, x, y, z)
assert poly(x*y*(x + y)*(x + z)**2) == \
Poly(x**3*y**2 + x*y**2*z**2 + y*x**2*z**2 + 2*z*x**2*y**2 + 2*y*z*x**3 + y*x**4, x, y, z)
assert poly(Poly(x + y + z, y, x, z)) == Poly(x + y + z, x, y, z)
assert poly(Poly(x + y + z, y, x, z), wrt=z) == Poly(x + y + z, z, x, y)
raises(GeneratorsNeeded, "poly(1)")
| tarballs-are-good/sympy | sympy/polys/tests/test_polytools.py | Python | bsd-3-clause | 65,988 | [
"Gaussian"
] | 56f41739507668d076f36b733e56072be10aaa9f24ec3873b372320cee3d5077 |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test neurom._neuronfunc functionality'''
import tempfile
from nose import tools as nt
import os
import numpy as np
from neurom import fst, load_neuron, NeuriteType
from neurom.fst import _neuronfunc as _nf
from neurom.core import make_soma, Neurite, Section
from neurom.core import _soma
from neurom.core.dataformat import POINT_TYPE
from neurom.core.population import Population
from neurom.io.datawrapper import BlockNeuronBuilder
from utils import _close, _equal
_PWD = os.path.dirname(os.path.abspath(__file__))
H5_PATH = os.path.join(_PWD, '../../../test_data/h5/v1/')
NRN = load_neuron(os.path.join(H5_PATH, 'Neuron.h5'))
SWC_PATH = os.path.join(_PWD, '../../../test_data/swc')
SIMPLE = load_neuron(os.path.join(SWC_PATH, 'simple.swc'))
def test_soma_surface_area():
ret = _nf.soma_surface_area(SIMPLE)
nt.eq_(ret, 12.566370614359172)
def test_soma_surface_areas():
ret = _nf.soma_surface_areas(SIMPLE)
nt.eq_(ret, [12.566370614359172, ])
def test_soma_radii():
ret = _nf.soma_radii(SIMPLE)
nt.eq_(ret, [1., ])
def test_trunk_section_lengths():
ret = _nf.trunk_section_lengths(SIMPLE)
nt.eq_(ret, [5.0, 4.0])
def test_trunk_origin_radii():
ret = _nf.trunk_origin_radii(SIMPLE)
nt.eq_(ret, [1.0, 1.0])
def test_trunk_origin_azimuths():
ret = _nf.trunk_origin_azimuths(SIMPLE)
nt.eq_(ret, [0.0, 0.0])
def test_trunk_origin_elevations():
class Mock(object):
pass
n0 = Mock()
n1 = Mock()
s = make_soma([[0, 0, 0, 4]])
t0 = Section(((1, 0, 0, 2), (2, 1, 1, 2)))
t0.type = NeuriteType.basal_dendrite
t1 = Section(((0, 1, 0, 2), (1, 2, 1, 2)))
t1.type = NeuriteType.basal_dendrite
n0.neurites = [Neurite(t0), Neurite(t1)]
n0.soma = s
t2 = Section(((0, -1, 0, 2), (-1, -2, -1, 2)))
t2.type = NeuriteType.basal_dendrite
n1.neurites = [Neurite(t2)]
n1.soma = s
pop = Population([n0, n1])
nt.eq_(list(_nf.trunk_origin_elevations(pop)),
[0.0, np.pi/2., -np.pi/2.])
nt.eq_(
list(_nf.trunk_origin_elevations(pop, neurite_type=NeuriteType.basal_dendrite)),
[0.0, np.pi/2., -np.pi/2.])
nt.eq_(len(_nf.trunk_origin_elevations(pop, neurite_type=NeuriteType.axon)),
0)
nt.eq_(len(_nf.trunk_origin_elevations(pop, neurite_type=NeuriteType.apical_dendrite)),
0)
@nt.raises(Exception)
def test_trunk_elevation_zero_norm_vector_raises():
_nf.trunk_origin_elevations(NRN)
def test_sholl_crossings_simple():
center = SIMPLE.soma.center
radii = []
nt.eq_(list(_nf.sholl_crossings(SIMPLE, center, radii=radii)),
[])
radii = [1.0]
nt.eq_([2],
list(_nf.sholl_crossings(SIMPLE, center, radii=radii)))
radii = [1.0, 5.1]
nt.eq_([2, 4],
list(_nf.sholl_crossings(SIMPLE, center, radii=radii)))
radii = [1., 4., 5.]
nt.eq_([2, 4, 5],
list(_nf.sholl_crossings(SIMPLE, center, radii=radii)))
def load_swc(string):
with tempfile.NamedTemporaryFile(prefix='test_neuron_func', mode='w', suffix='.swc') as fd:
fd.write(string)
fd.flush()
return load_neuron(fd.name)
def test_sholl_analysis_custom():
#recreate morphs from Fig 2 of
#http://dx.doi.org/10.1016/j.jneumeth.2014.01.016
radii = np.arange(10, 81, 10)
center = 0, 0, 0
morph_A = load_swc('''\
1 1 0 0 0 1. -1
2 3 0 0 0 1. 1
3 3 80 0 0 1. 2
4 4 0 0 0 1. 1
5 4 -80 0 0 1. 4''')
nt.eq_(list(_nf.sholl_crossings(morph_A, center, radii=radii)),
[2, 2, 2, 2, 2, 2, 2, 2])
morph_B = load_swc('''\
1 1 0 0 0 1. -1
2 3 0 0 0 1. 1
3 3 35 0 0 1. 2
4 3 51 10 0 1. 3
5 3 51 5 0 1. 3
6 3 51 0 0 1. 3
7 3 51 -5 0 1. 3
8 3 51 -10 0 1. 3
9 4 -35 0 0 1. 2
10 4 -51 10 0 1. 9
11 4 -51 5 0 1. 9
12 4 -51 0 0 1. 9
13 4 -51 -5 0 1. 9
14 4 -51 -10 0 1. 9
''')
nt.eq_(list(_nf.sholl_crossings(morph_B, center, radii=radii)),
[2, 2, 2, 10, 10, 0, 0, 0])
morph_C = load_swc('''\
1 1 0 0 0 1. -1
2 3 0 0 0 1. 1
3 3 65 0 0 1. 2
4 3 85 10 0 1. 3
5 3 85 5 0 1. 3
6 3 85 0 0 1. 3
7 3 85 -5 0 1. 3
8 3 85 -10 0 1. 3
9 4 65 0 0 1. 2
10 4 85 10 0 1. 9
11 4 85 5 0 1. 9
12 4 85 0 0 1. 9
13 4 85 -5 0 1. 9
14 4 85 -10 0 1. 9
''')
nt.eq_(list(_nf.sholl_crossings(morph_C, center, radii=radii)),
[2, 2, 2, 2, 2, 2, 10, 10])
#from neurom.view import view
#view.neuron(morph_C)[0].savefig('foo.png')
| liesbethvanherpe/NeuroM | neurom/fst/tests/test_neuronfunc.py | Python | bsd-3-clause | 6,299 | [
"NEURON"
] | 370fae49e4b1787b564ab6ab85def73ac9dd4b7e4b8c04cfd527c83455990bcb |
from numpy import array, arange, frombuffer, load, asarray, random, \
fromstring, expand_dims, unravel_index, prod
try:
buffer
except NameError:
buffer = memoryview
from ..utils import check_spark, check_options
spark = check_spark()
def fromrdd(rdd, nrecords=None, shape=None, index=None, labels=None, dtype=None, ordered=False):
"""
Load series data from a Spark RDD.
Assumes keys are tuples with increasing and unique indices,
and values are 1d ndarrays. Will try to infer properties
that are not explicitly provided.
Parameters
----------
rdd : SparkRDD
An RDD containing series data.
shape : tuple or array, optional, default = None
Total shape of data (if provided will avoid check).
nrecords : int, optional, default = None
Number of records (if provided will avoid check).
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
dtype : string, default = None
Data numerical type (if provided will avoid check)
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .series import Series
from bolt.spark.array import BoltArraySpark
if index is None or dtype is None:
item = rdd.values().first()
if index is None:
index = range(len(item))
if dtype is None:
dtype = item.dtype
if nrecords is None and shape is not None:
nrecords = prod(shape[:-1])
if nrecords is None:
nrecords = rdd.count()
if shape is None:
shape = (nrecords, asarray(index).shape[0])
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=shape, dtype=dtype, split=len(shape)-1, ordered=ordered)
return Series(values, index=index, labels=labels)
def fromarray(values, index=None, labels=None, npartitions=None, engine=None):
"""
Load series data from an array.
Assumes that all but final dimension index the records,
and the size of the final dimension is the length of each record,
e.g. a (2, 3, 4) array will be treated as 2 x 3 records of size (4,)
Parameters
----------
values : array-like
An array containing the data. Can be a numpy array,
a bolt array, or an array-like.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same shape as values.shape[:-1].
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
from .series import Series
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Series(values)
values = asarray(values)
if values.ndim < 2:
values = expand_dims(values, 0)
if index is not None and not asarray(index).shape[0] == values.shape[-1]:
raise ValueError('Index length %s not equal to record length %s'
% (asarray(index).shape[0], values.shape[-1]))
if index is None:
index = arange(values.shape[-1])
if spark and isinstance(engine, spark):
axis = tuple(range(values.ndim - 1))
values = bolt.array(values, context=engine, npartitions=npartitions, axis=axis)
values._ordered = True
return Series(values, index=index)
return Series(values, index=index, labels=labels)
def fromlist(items, accessor=None, index=None, labels=None, dtype=None, npartitions=None, engine=None):
"""
Load series data from a list with an optional accessor function.
Will call accessor function on each item from the list,
providing a generic interface for data loading.
Parameters
----------
items : list
A list of items to load.
accessor : function, optional, default = None
A function to apply to each item in the list during loading.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same length as items.
dtype : string, default = None
Data numerical type (if provided will avoid check)
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
if spark and isinstance(engine, spark):
if dtype is None:
dtype = accessor(items[0]).dtype if accessor else items[0].dtype
nrecords = len(items)
keys = map(lambda k: (k, ), range(len(items)))
if not npartitions:
npartitions = engine.defaultParallelism
items = zip(keys, items)
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, index=index, labels=labels, dtype=dtype, ordered=True)
else:
if accessor:
items = [accessor(i) for i in items]
return fromarray(items, index=index, labels=labels)
def fromtext(path, ext='txt', dtype='float64', skip=0, shape=None, index=None, labels=None, npartitions=None, engine=None, credentials=None):
"""
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
if spark and isinstance(engine, spark):
def parse(line, skip):
vec = [float(x) for x in line.split(' ')]
return array(vec[skip:], dtype=dtype)
lines = engine.textFile(path, npartitions)
data = lines.map(lambda x: parse(x, skip))
def switch(record):
ary, idx = record
return (idx,), ary
rdd = data.zipWithIndex().map(switch)
return fromrdd(rdd, dtype=str(dtype), shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for kv in data:
for line in str(kv[1].decode('utf-8')).split('\n')[:-1]:
values.append(fromstring(line, sep=' '))
values = asarray(values)
if skip > 0:
values = values[:, skip:]
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
def frombinary(path, ext='bin', conf='conf.json', dtype=None, shape=None, skip=0, index=None, labels=None, engine=None, credentials=None):
"""
Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
shape, dtype = _binaryconfig(path, conf, dtype, shape, credentials)
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
from numpy import dtype as dtype_func
nelements = shape[-1] + skip
recordsize = dtype_func(dtype).itemsize * nelements
if spark and isinstance(engine, spark):
lines = engine.binaryRecords(path, recordsize)
raw = lines.map(lambda x: frombuffer(buffer(x), offset=0, count=nelements, dtype=dtype)[skip:])
def switch(record):
ary, idx = record
return (idx,), ary
rdd = raw.zipWithIndex().map(switch)
if shape and len(shape) > 2:
expand = lambda k: unravel_index(k[0], shape[0:-1])
rdd = rdd.map(lambda kv: (expand(kv[0]), kv[1]))
if not index:
index = arange(shape[-1])
return fromrdd(rdd, dtype=dtype, shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for record in data:
buf = record[1]
offset = 0
while offset < len(buf):
v = frombuffer(buffer(buf), offset=offset, count=nelements, dtype=dtype)
values.append(v[skip:])
offset += recordsize
if not len(values) == prod(shape[0:-1]):
raise ValueError('Unexpected shape, got %g records but expected %g'
% (len(values), prod(shape[0:-1])))
values = asarray(values, dtype=dtype)
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
def _binaryconfig(path, conf, dtype=None, shape=None, credentials=None):
"""
Collects parameters to use for binary series loading.
"""
import json
from thunder.readers import get_file_reader, FileNotFoundError
reader = get_file_reader(path)(credentials=credentials)
try:
buf = reader.read(path, filename=conf)
params = json.loads(str(buf.decode('utf-8')))
except FileNotFoundError:
params = {}
if dtype:
params['dtype'] = dtype
if shape:
params['shape'] = shape
if 'dtype' not in params.keys():
raise ValueError('dtype not specified either in conf.json or as argument')
if 'shape' not in params.keys():
raise ValueError('shape not specified either in conf.json or as argument')
return params['shape'], params['dtype']
def fromrandom(shape=(100, 10), npartitions=1, seed=42, engine=None):
"""
Generate random gaussian series data.
Parameters
----------
shape : tuple, optional, default = (100,10)
Dimensions of data.
npartitions : int, optional, default = 1
Number of partitions with which to distribute data.
seed : int, optional, default = 42
Randomization seed.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
seed = hash(seed)
def generate(v):
random.seed(seed + v)
return random.randn(shape[1])
return fromlist(range(shape[0]), accessor=generate, npartitions=npartitions, engine=engine)
def fromexample(name=None, engine=None):
"""
Load example series data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, options include 'iris' | 'mouse' | 'fish'.
If not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
import os
import tempfile
import shutil
from boto.s3.connection import S3Connection
datasets = ['iris', 'mouse', 'fish']
if name is None:
print('Availiable example series datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
d = tempfile.mkdtemp()
try:
os.mkdir(os.path.join(d, 'series'))
os.mkdir(os.path.join(d, 'series', name))
conn = S3Connection(anon=True)
bucket = conn.get_bucket('thunder-sample-data')
for key in bucket.list(os.path.join('series', name) + '/'):
if not key.name.endswith('/'):
key.get_contents_to_filename(os.path.join(d, key.name))
data = frombinary(os.path.join(d, 'series', name), engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
finally:
shutil.rmtree(d)
return data
| thunder-project/thunder | thunder/series/readers.py | Python | apache-2.0 | 14,550 | [
"Gaussian"
] | 0d3c9a71a6548493a2c0c9f614a20e58f0207bd5e1a7c7157b7705d542f5ff03 |
# $HeadURL: $
''' TransferCommand module
'''
from datetime import datetime, timedelta
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id: $'
class TransferCommand( Command ):
'''
Transfer "master" Command
'''
def __init__( self, args = None, clients = None ):
super( TransferCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def _storeCommand( self, results ):
'''
Stores the results of doNew method on the database.
'''
for result in results:
resQuery = self.rmClient.addOrModifyTransferCache( result[ 'SourceName' ],
result[ 'DestinationName' ],
result[ 'Metric' ],
result[ 'Value' ] )
if not resQuery[ 'OK' ]:
return resQuery
return S_OK()
def _prepareCommand( self ):
'''
TransferChannelCommand requires four arguments:
- hours : <int>
- direction : Source | Destination
- elementName : <str>
- metric : Quality | FailedTransfers
GGUSTickets are associated with gocDB names, so we have to transform the
diracSiteName into a gocSiteName.
'''
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
if not 'direction' in self.args:
return S_ERROR( 'direction is missing' )
direction = self.args[ 'direction' ]
if direction not in [ 'Source', 'Destination' ]:
return S_ERROR( 'direction is not Source nor Destination' )
if not 'name' in self.args:
return S_ERROR( '"name" is missing' )
name = self.args[ 'name' ]
if not 'metric' in self.args:
return S_ERROR( 'metric is missing' )
metric = self.args[ 'metric' ]
if metric not in [ 'Quality', 'FailedTransfers' ]:
return S_ERROR( 'metric is not Quality nor FailedTransfers' )
return S_OK( ( hours, name, direction, metric ) )
def doNew( self, masterParams = None ):
'''
Gets the parameters to run, either from the master method or from its
own arguments.
For every elementName ( cannot process bulk queries.. ) contacts the
accounting client. It reurns dictionaries like { 'X -> Y' : { id: 100%.. } }
If there are ggus tickets, are recorded and then returned.
'''
if masterParams is not None:
hours, name, direction, metric = masterParams
else:
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
hours, name, direction, metric = params[ 'Value' ]
toD = datetime.utcnow()
fromD = toD - timedelta( hours = hours )
# dictionary with conditions for the accounting
transferDict = {
'OperationType' : 'putAndRegister',
direction : name
}
if metric == 'FailedTransfers':
transferDict[ 'FinalStatus' ] = [ 'Failed' ]
transferResults = self.rClient.getReport( 'DataOperation', metric, fromD,
toD, transferDict, 'Channel' )
if not transferResults[ 'OK' ]:
return transferResults
transferResults = transferResults[ 'Value' ]
if not 'data' in transferResults:
return S_ERROR( 'Missing data key' )
transferResults = transferResults[ 'data' ]
uniformResult = []
for channel, elementDict in transferResults.items():
try:
source, destination = channel.split( ' -> ' )
except ValueError:
continue
channelDict = {}
channelDict[ 'SourceName' ] = source
channelDict[ 'DestinationName' ] = destination
channelDict[ 'Metric' ] = metric
channelDict[ 'Value' ] = sum( elementDict.values() ) / len( elementDict.values() )
uniformResult.append( channelDict )
storeRes = self._storeCommand( uniformResult )
if not storeRes[ 'OK' ]:
return storeRes
# Compute mean of all transfer channels
value = 0
for channelDict in uniformResult:
value += channelDict[ 'Value' ]
if uniformResult:
value = float( value ) / len( uniformResult )
else:
value = None
return S_OK( { 'Mean' : value, 'Name' : name } )
def doCache( self ):
'''
Method that reads the cache table and tries to read from it. It will
return a list of dictionaries if there are results.
'''
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
_hours, name, direction, metric = params[ 'Value' ]
sourceName, destinationName = None, None
if direction == 'Source':
sourceName = name
if direction == 'Destination':
destinationName = name
result = self.rmClient.selectTransferCache( sourceName, destinationName, metric )
if not result[ 'OK' ]:
return result
result = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ]
# Compute mean of all transfer channels
value = 0
for channelDict in result:
value += channelDict[ 'Value' ]
if result:
value = float( value ) / len( result )
else:
value = None
return S_OK( { 'Mean' : value, 'Name' : name } )
def doMaster( self ):
'''
Master method, which looks little bit spaguetti code, sorry !
- It gets all Sites.
- It gets all StorageElements
As there is no bulk query, it compares with what we have on the database.
It queries a portion of them.
'''
sites = CSHelpers.getSites()
if not sites[ 'OK' ]:
return sites
sites = sites[ 'Value' ]
ses = CSHelpers.getStorageElements()
if not ses[ 'OK' ]:
return ses
ses = ses[ 'Value' ]
elementNames = sites + ses
# sourceQuery = self.rmClient.selectTransferCache( meta = { 'columns' : [ 'SourceName' ] } )
# if not sourceQuery[ 'OK' ]:
# return sourceQuery
# sourceQuery = [ element[0] for element in sourceQuery[ 'Value' ] ]
#
# sourceElementsToQuery = list( set( elementNames ).difference( set( sourceQuery ) ) )
gLogger.info( 'Processing %s' % ', '.join( elementNames ) )
for metric in [ 'Quality', 'FailedTransfers' ]:
for direction in [ 'Source', 'Destination' ]:
# 2 hours of window
result = self.doNew( ( 2, elementNames, direction, metric ) )
if not result[ 'OK' ]:
self.metrics[ 'failed' ].append( result )
return S_OK( self.metrics )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF | marcelovilaca/DIRAC | ResourceStatusSystem/Command/TransferCommand.py | Python | gpl-3.0 | 8,013 | [
"DIRAC"
] | ed595ae8deb0cd2bae31224b13fb3203b7747d0dc6f4e6c06c8453ebcf08b0ed |
from vtk import *
source = vtkRandomGraphSource()
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(source.GetOutputPort())
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
| collects/VTK | Examples/Infovis/Python/hello_world.py | Python | bsd-3-clause | 253 | [
"VTK"
] | 722bba4f7955bd3df814ee2bf415493001d8da256eff4ed8909b1affb0b98388 |
from itertools import chain
from tools import indent
from visitor import Visitor
from expr import ExpresionOut
from alias import AliasOut
class StatementOut(Visitor):
level = 2
@classmethod
def handle_body(self, body, *a, **k):
body = (self.visit(stmt, *a, **k) for stmt in body)
body = chain.from_iterable(body)
body = indent(body, self.level)
return body
@classmethod
def visit_FunctionDef(self, ast, *a, **k):
decorators = ["@%s" % self.visit(dec, *a, **k) for dec in ast.decorator_list]
head = ["def %s (%s):" % (ast.name, self.visit(ast.args))]
body = self.handle_body(ast.body, *a, **k)
return decorators + head + body
@classmethod
def visit_arguments(self, ast, *a, **k):
args = [ExpresionOut.visit(arg, *a, **k) for arg in ast.args]
defaults = (ExpresionOut.visit(default, *a, **k) for default in ast.defaults)
defaults = ("%s=%s"% t for t in zip(args[-len(ast.defaults):],defaults))
args = args[:-len(ast.defaults)]
vararg = ("*%s" % ast.vararg,) if ast.vararg else ()
kwarg = ("**%s" % ast.kwarg,) if ast.kwarg else ()
return ", ".join(chain(args, defaults, vararg, kwarg))
@classmethod
def visit_ClassDef (self, ast, *a, **k):
decorators = ["@%s" % self.visit(dec, *a, **k) for dec in ast.decorator_list]
if ast.bases:
head = ["class %s (%s):" % (ast.name, self.visit(ast.bases))]
else:
head = ["class %s:" % (ast.name)]
body = self.handle_body(ast.body, *a, **k)
return decorators + head + body
@classmethod
def visit_Return (self, ast, *a, **k):
if ast.value:
return ["return %s" % ExpresionOut.visit(ast.value, *a, **k)]
else:
return ["return"]
@classmethod
def visit_Delete (self, ast, *a, **k):
return ["del %s" % ", ".join(ExpresionOut.visit(target, *a, **k) for target in ast.targets)]
@classmethod
def visit_Assign (self, ast, *a, **k):
targets = ", ".join(ExpresionOut.visit(target, *a, **k) for target in ast.targets)
value = ExpresionOut.visit(ast.value, *a, **k)
return ["%s = %s" %(targets, value)]
@classmethod
def visit_AugAssign (self, ast, *a, **k):
target = ExpresionOut.visit(ast.target, *a, **k)
op = OperatorOut.visit(ast.op, *a, **k)
value = ExpresionOut.visit(ast.value, *a, **k)
return ["%s %s= %s" % (target, op, value)]
@classmethod
def visit_Print (self, ast, *a, **k):
values = (ExpresionOut.visit(value, *a, **k) for value in ast.values)
dest = (ExpresionOut.visit(ast.dest, *a, **k), ) if ast.dest else ()
stmt = "print >> %s" if dest else "print %s"
stmt %= ", ".join(chain(dest, values))
if ast.nl:
return [stmt]
else:
return [stmt+","]
@classmethod
def visit_For (self, ast, *a, **k):
target = ExpresionOut.visit(ast.target, *a, **k)
iter = ExpresionOut.visit(ast.iter, *a ,**k)
head = "for %s in %s:"
head %= (target, iter)
head = [head]
body = self.handle_body(ast.body, *a, **k)
if not ast.orelse:
return head + body
else:
orelse = self.handle_body(ast.orelse, *a, **k)
return head + body + ["else:"] + orelse
@classmethod
def visit_While(self, ast, *a, **k):
test = ExpresionOut.visit(ast.test, *a, **k)
head = "while %s:"
head %= test
head = [head]
body = self.handle_body(ast.body, *a, **k)
if not ast.orelse:
return head + body
else:
orelse = self.handle_body(ast.orelse, *a, **k)
return head + body + ["else:"] + orelse
@classmethod
def visit_If(self, ast, *a, **k):
test = ExpresionOut.visit(ast.test, *a, **k)
head = "if %s:"
head %= test
head = [head]
body = self.handle_body(ast.body, *a, **k)
if not ast.orelse:
return head + body
else:
orelse = self.handle_body(ast.orelse, *a, **k)
return head + body + ["else:"] + orelse
@classmethod
def visit_With(self, ast, *a, **k):
context_expr = ExpresionOut.visit(ast.context_expr, *a, **k)
if ast.optional_vars:
optional_vars = ExpresionOut.visit(ast.context_expr, *a, **k)
head = "with %s as %s:"
head %= (context_expr, optional_vars)
else:
head = "with %s:"
head %= context_expr
head = [head]
body = self.handle_body(ast.body)
return head + body
@classmethod
def visit_Raise(self, ast, *a, **k):
type = (ExpresionOut.visit(ast.type, *a, **k),) if ast.type else ()
inst = (ExpresionOut.visit(ast.inst, *a, **k),) if ast.inst else ()
tback = (ExpresionOut.visit(ast.tback, *a, **k),) if ast.tback else ()
head = "raise %s"
head %= ", ".join(chain(type, inst, tback))
head = [head]
return head
@classmethod
def visit_TryExcept(self, ast, *a, **k):
head = ["try:"]
body = self.handle_body(ast.body, *a, **k)
handlers = (ExcepthandlerOut.visit(handler, *a, **k) for handler in ast.handlers)
handlers = list(chain.from_iterable(handlers))
orelse = self.handle_body(ast.orelse, *a, **k)
if not ast.orelse:
return head + body + handlers
return head + body + handlers + ["else:"] + orelse
@classmethod
def visit_Tryfinally(self, ast, *a, **k):
body = self.handle_body(ast.body, *a, **k)
finalbody = self.handle_body(ast.finalbody, *a, **k)
return ["try:"] + body + ["finaly:"] + finalbody
@classmethod
def visit_Assert(self, ast, *a, **k):
test = (ExpresionOut.visit(ast.test, *a, **k),)
msg = (ExpresionOut.visit(ast.msg, *a, **k),) if ast.msg else ()
head = "assert %s"
head %= ", ".join(chain(test, msg))
head = [head]
return head
@classmethod
def visit_Import(self, ast, *a, **k):
names = join(AliasOut.visit(alias, *a, **k) for alias in ast.names)
return ["import %s" % names]
@classmethod
def visit_ImportFrom(self, ast, *a, **k):
# what dose ast.level do?
module = ast.module
names = ", ".join(AliasOut.visit(alias, *a, **k) for alias in ast.names)
return ["from %s import %s" % (module, names)]
@classmethod
def visit_Exec(self, ast, *a, **k):
if ast.globals:
body = ExpresionOut.visit(ast.body, *a, **k)
locals = ExpresionOut.visit(ast.locals, *a, **k)
globals = ExpresionOut.visit(ast.globals, *a, **k)
head = "exec %s in %s, %s"
head %= (body, globals, locals)
head = [head]
elif ast.locals:
body = ExpresionOut.visit(ast.body, *a, **k)
locals = ExpresionOut.visit(ast.locals, *a, **k)
head = "exec %s in %s"
head %= (body, locals)
head = [head]
else:
body = ExpresionOut.visit(ast.body, *a, **k)
head = "exec %s"
head %= body
head = [head]
return head
@classmethod
def visit_Global(self, ast, *a, **k):
return ["global %s" % ", ".join(ast.names)]
@classmethod
def visit_Expr(self, ast, *a, **k):
return [ExpresionOut.visit(ast.value, *a, **k)]
@classmethod
def visit_Pass (self, ast, *a, **k):
return ["pass"]
@classmethod
def visit_Break(self, ast, *a, **k):
return ["break"]
@classmethod
def visit_Continue(self, ast, *a, **k):
return ["continue"]
| Neppord/py2py | py2py_lib/out/stmt.py | Python | mit | 7,125 | [
"VisIt"
] | b4bbfc4bbe8603792581f223dd22fbbe5e5848d8bd56126f8eb0bc54c819fba1 |
import unittest, os
import numpy as np
import warnings
warnings.simplefilter('error')
from nose.plugins.attrib import attr
from moltools import Generator, Water, Molecule
FILE_XYZ =os.path.join( os.path.dirname( os.path.realpath( __file__ ) ), "tmp.xyz" )
@attr(speed = 'fast' )
class GeneratorTestCase( unittest.TestCase ):
def setUp(self):
self.g = Generator()
def test_get_hfqua_dal(self):
string = self.g.get_hfqua_dal()
assert "**DALTON" in string
assert ".PARALLELL" in string
assert ".DIPLEN" in string
assert "**WAVE FUNCTION" in string
assert ".HF" in string
assert "**END" in string
def test_get_mol(self):
w = Water.get_standard()
self.assertIsInstance( w, Molecule )
def test_vary_parameters(self):
#Could modify in future this test todo
a = self.g.vary_parameters( {"dog" : {"min":"small", "max":5, "points":33} } )
self.assertIsNone(a)
def test_polar_to_cartesian_5_0_0(self):
v = self.g.polar_to_cartesian( 5, 0, 0 )
v_ref = np.zeros(3)
v_ref[2] = 5
self.eq( v, v_ref )
def test_polar_to_cartesian_3_pi_pi(self):
v = self.g.polar_to_cartesian( 3, np.pi, np.pi )
v_ref = np.zeros(3)
v_ref[2] = -3
self.almost_eq( v, v_ref, decimal = 14 )
#@mock.patch( "use_generator.open" ,create = True)
#def test_gen_mols_param(self, mock_open):
# mock_open.return_value = mock.MagicMock( spec = file )
# ret = self.g.gen_mols_param()
# assert ret == 0
#Mocking added to ensure that the file tmp.xyz isn't opened writable during tests
# @mock.patch( "use_generator.write" ,create = True)
# @mock.patch( "use_generator.open" ,create = True)
# def test_build_pna(self, mock_open, mock_write):
# mock_open.return_value = mock.MagicMock( spec = file )
# mock_write.return_value = mock.MagicMock( spec = file )
# d = FILE_XYZ
# res = self.g.build_pna( xyz = d, waters = 1 )
# self.assertIsNone( res )
def almost_eq(self, a, b, decimal = 7):
np.testing.assert_almost_equal( a, b, decimal = decimal )
def eq(self, a, b, ):
np.testing.assert_equal( a, b )
def tearDown(self):
files = [f for f in os.listdir( os.path.dirname( os.path.realpath(__file__) ) ) if f.endswith(".pot") or f == "pna.mol" or f.startswith("5.00-") ]
for fi in files:
os.remove( os.path.join(os.path.dirname( __file__ ) , fi) )
if __name__ == '__main__':
unittest.main()
| fishstamp82/moltools | moltools/test/test_use_generator.py | Python | mit | 2,571 | [
"Dalton"
] | 700393602cffbb81c473832b1b4e4c5dc07db23079cd5d3515647cc065996f7a |
"""
Module to provide RabbitMQ compatibility to Salt.
Todo: A lot, need to add cluster support, logging, and minion configuration
data.
"""
import logging
import os
import random
import re
import string
import salt.utils.itertools
import salt.utils.json
import salt.utils.path
import salt.utils.platform
import salt.utils.user
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
log = logging.getLogger(__name__)
RABBITMQCTL = None
RABBITMQ_PLUGINS = None
def __virtual__():
"""
Verify RabbitMQ is installed.
"""
global RABBITMQCTL
global RABBITMQ_PLUGINS
if salt.utils.platform.is_windows():
import winreg
key = None
try:
key = winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\VMware, Inc.\\RabbitMQ Server",
0,
winreg.KEY_READ | winreg.KEY_WOW64_32KEY,
)
(dir_path, value_type) = winreg.QueryValueEx(key, "Install_Dir")
if value_type != winreg.REG_SZ:
raise TypeError(
"Invalid RabbitMQ Server directory type: {}".format(value_type)
)
if not os.path.isdir(dir_path):
raise OSError("RabbitMQ directory not found: {}".format(dir_path))
subdir_match = ""
for name in os.listdir(dir_path):
if name.startswith("rabbitmq_server-"):
subdir_path = os.path.join(dir_path, name)
# Get the matching entry that is last in ASCII order.
if os.path.isdir(subdir_path) and subdir_path > subdir_match:
subdir_match = subdir_path
if not subdir_match:
raise OSError(
'"rabbitmq_server-*" subdirectory not found in: {}'.format(dir_path)
)
RABBITMQCTL = os.path.join(subdir_match, "sbin", "rabbitmqctl.bat")
RABBITMQ_PLUGINS = os.path.join(
subdir_match, "sbin", "rabbitmq-plugins.bat"
)
except Exception: # pylint: disable=broad-except
pass
finally:
if key is not None:
winreg.CloseKey(key)
else:
RABBITMQCTL = salt.utils.path.which("rabbitmqctl")
RABBITMQ_PLUGINS = salt.utils.path.which("rabbitmq-plugins")
if not RABBITMQCTL:
return (False, "Module rabbitmq: module only works when RabbitMQ is installed")
return True
def _check_response(response):
if isinstance(response, dict):
if response["retcode"] != 0 or response["stderr"]:
raise CommandExecutionError(
"RabbitMQ command failed: {}".format(response["stderr"])
)
else:
if "Error" in response:
raise CommandExecutionError("RabbitMQ command failed: {}".format(response))
def _format_response(response, msg):
if isinstance(response, dict):
if response["retcode"] != 0 or response["stderr"]:
raise CommandExecutionError(
"RabbitMQ command failed: {}".format(response["stderr"])
)
else:
response = response["stdout"]
else:
if "Error" in response:
raise CommandExecutionError("RabbitMQ command failed: {}".format(response))
return {msg: response}
def _get_rabbitmq_plugin():
"""
Returns the rabbitmq-plugin command path if we're running an OS that
doesn't put it in the standard /usr/bin or /usr/local/bin
This works by taking the rabbitmq-server version and looking for where it
seems to be hidden in /usr/lib.
"""
global RABBITMQ_PLUGINS
if RABBITMQ_PLUGINS is None:
version = __salt__["pkg.version"]("rabbitmq-server").split("-")[0]
RABBITMQ_PLUGINS = (
"/usr/lib/rabbitmq/lib/rabbitmq_server-{}/sbin/rabbitmq-plugins".format(
version
)
)
return RABBITMQ_PLUGINS
def _safe_output(line):
"""
Looks for rabbitmqctl warning, or general formatting, strings that aren't
intended to be parsed as output.
Returns a boolean whether the line can be parsed as rabbitmqctl output.
"""
return not any(
[
line.startswith("Listing") and line.endswith("..."),
line.startswith("Listing") and "\t" not in line,
"...done" in line,
line.startswith("WARNING:"),
len(line) == 0,
]
)
def _strip_listing_to_done(output_list):
"""
Conditionally remove non-relevant first and last line,
"Listing ..." - "...done".
outputlist: rabbitmq command output split by newline
return value: list, conditionally modified, may be empty.
"""
return [line for line in output_list if _safe_output(line)]
def _output_to_dict(cmdoutput, values_mapper=None):
"""
Convert rabbitmqctl output to a dict of data
cmdoutput: string output of rabbitmqctl commands
values_mapper: function object to process the values part of each line
"""
if isinstance(cmdoutput, dict):
if cmdoutput["retcode"] != 0 or cmdoutput["stderr"]:
raise CommandExecutionError(
"RabbitMQ command failed: {}".format(cmdoutput["stderr"])
)
cmdoutput = cmdoutput["stdout"]
ret = {}
if values_mapper is None:
values_mapper = lambda string: string.split("\t")
# remove first and last line: Listing ... - ...done
data_rows = _strip_listing_to_done(cmdoutput.splitlines())
for row in data_rows:
try:
key, values = row.split("\t", 1)
except ValueError:
# If we have reached this far, we've hit an edge case where the row
# only has one item: the key. The key doesn't have any values, so we
# set it to an empty string to preserve rabbitmq reporting behavior.
# e.g. A user's permission string for '/' is set to ['', '', ''],
# Rabbitmq reports this only as '/' from the rabbitmqctl command.
log.debug(
"Could not find any values for key '%s'. "
"Setting to '%s' to an empty string.",
row,
row,
)
ret[row] = ""
continue
ret[key] = values_mapper(values)
return ret
def _output_to_list(cmdoutput):
"""
Convert rabbitmqctl output to a list of strings (assuming whitespace-delimited output).
Ignores output lines that shouldn't be parsed, like warnings.
cmdoutput: string output of rabbitmqctl commands
"""
return [
item
for line in cmdoutput.splitlines()
if _safe_output(line)
for item in line.split()
]
def _output_lines_to_list(cmdoutput):
"""
Convert rabbitmqctl output to a list of strings (assuming newline-delimited output).
Ignores output lines that shouldn't be parsed, like warnings.
cmdoutput: string output of rabbitmqctl commands
"""
return [line.strip() for line in cmdoutput.splitlines() if _safe_output(line)]
def list_users(runas=None):
"""
Return a list of users based off of rabbitmqctl user_list.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_users
"""
# Windows runas currently requires a password.
# Due to this, don't use a default value for
# runas in Windows.
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "list_users", "-q"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
# func to get tags from string such as "[admin, monitoring]"
func = (
lambda string: [x.strip() for x in string[1:-1].split(",")]
if "," in string
else [x for x in string[1:-1].split(" ")]
)
return _output_to_dict(res, func)
def list_vhosts(runas=None):
"""
Return a list of vhost based on rabbitmqctl list_vhosts.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_vhosts
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "list_vhosts", "-q"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
return _output_to_list(res["stdout"])
def list_upstreams(runas=None):
"""
Returns a dict of upstreams based on rabbitmqctl list_parameters.
:param str runas: The name of the user to run this command as.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_upstreams
.. versionadded:: 3000
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
ret = {}
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "list_parameters", "-q"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
for raw_line in res["stdout"].split("\n"):
if _safe_output(raw_line):
(_, name, definition) = raw_line.split("\t")
ret[name] = definition
return ret
def user_exists(name, runas=None):
"""
Return whether the user exists based on rabbitmqctl list_users.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.user_exists rabbit_user
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
return name in list_users(runas=runas)
def vhost_exists(name, runas=None):
"""
Return whether the vhost exists based on rabbitmqctl list_vhosts.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.vhost_exists rabbit_host
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
return name in list_vhosts(runas=runas)
def upstream_exists(name, runas=None):
"""
Return whether the upstreamexists based on rabbitmqctl list_parameters.
:param str name: The name of the upstream to check for.
:param str runas: The name of the user to run the command as.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.upstream_exists rabbit_upstream
.. versionadded:: 3000
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
return name in list_upstreams(runas=runas)
def add_user(name, password=None, runas=None):
"""
Add a rabbitMQ user via rabbitmqctl user_add <user> <password>
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.add_user rabbit_user password
"""
clear_pw = False
if password is None:
# Generate a random, temporary password. RabbitMQ requires one.
clear_pw = True
password = "".join(
random.SystemRandom().choice(string.ascii_uppercase + string.digits)
for x in range(15)
)
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
if salt.utils.platform.is_windows():
# On Windows, if the password contains a special character
# such as '|', normal execution will fail. For example:
# cmd: rabbitmq.add_user abc "asdf|def"
# stderr: 'def' is not recognized as an internal or external
# command,\r\noperable program or batch file.
# Work around this by using a shell and a quoted command.
python_shell = True
cmd = '"{}" add_user "{}" "{}"'.format(RABBITMQCTL, name, password)
else:
python_shell = False
cmd = [RABBITMQCTL, "add_user", name, password]
res = __salt__["cmd.run_all"](
cmd,
reset_system_locale=False,
output_loglevel="quiet",
runas=runas,
python_shell=python_shell,
)
if clear_pw:
# Now, Clear the random password from the account, if necessary
try:
clear_password(name, runas)
except Exception: # pylint: disable=broad-except
# Clearing the password failed. We should try to cleanup
# and rerun and error.
delete_user(name, runas)
raise
msg = "Added"
return _format_response(res, msg)
def delete_user(name, runas=None):
"""
Deletes a user via rabbitmqctl delete_user.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.delete_user rabbit_user
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "delete_user", name],
reset_system_locale=False,
python_shell=False,
runas=runas,
)
msg = "Deleted"
return _format_response(res, msg)
def change_password(name, password, runas=None):
"""
Changes a user's password.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.change_password rabbit_user password
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
if salt.utils.platform.is_windows():
# On Windows, if the password contains a special character
# such as '|', normal execution will fail. For example:
# cmd: rabbitmq.add_user abc "asdf|def"
# stderr: 'def' is not recognized as an internal or external
# command,\r\noperable program or batch file.
# Work around this by using a shell and a quoted command.
python_shell = True
cmd = '"{}" change_password "{}" "{}"'.format(RABBITMQCTL, name, password)
else:
python_shell = False
cmd = [RABBITMQCTL, "change_password", name, password]
res = __salt__["cmd.run_all"](
cmd,
reset_system_locale=False,
runas=runas,
output_loglevel="quiet",
python_shell=python_shell,
)
msg = "Password Changed"
return _format_response(res, msg)
def clear_password(name, runas=None):
"""
Removes a user's password.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.clear_password rabbit_user
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "clear_password", name],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
msg = "Password Cleared"
return _format_response(res, msg)
def check_password(name, password, runas=None):
"""
.. versionadded:: 2016.3.0
Checks if a user's password is valid.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.check_password rabbit_user password
"""
# try to get the rabbitmq-version - adapted from _get_rabbitmq_plugin
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
try:
res = __salt__["cmd.run"](
[RABBITMQCTL, "status"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
# Check regex against older RabbitMQ version status output
old_server_version = re.search(r'\{rabbit,"RabbitMQ","(.+)"\}', res)
# Check regex against newer RabbitMQ version status output
server_version = re.search(r"RabbitMQ version:\s*(.+)", res)
if server_version is None and old_server_version is None:
raise ValueError
if old_server_version:
server_version = old_server_version
server_version = server_version.group(1).split("-")[0]
version = [int(i) for i in server_version.split(".")]
except ValueError:
version = (0, 0, 0)
if len(version) < 3:
version = (0, 0, 0)
# rabbitmq introduced a native api to check a username and password in version 3.5.7.
if tuple(version) >= (3, 5, 7):
if salt.utils.platform.is_windows():
# On Windows, if the password contains a special character
# such as '|', normal execution will fail. For example:
# cmd: rabbitmq.add_user abc "asdf|def"
# stderr: 'def' is not recognized as an internal or external
# command,\r\noperable program or batch file.
# Work around this by using a shell and a quoted command.
python_shell = True
cmd = '"{}" authenticate_user "{}" "{}"'.format(RABBITMQCTL, name, password)
else:
python_shell = False
cmd = [RABBITMQCTL, "authenticate_user", name, password]
res = __salt__["cmd.run_all"](
cmd,
reset_system_locale=False,
runas=runas,
output_loglevel="quiet",
python_shell=python_shell,
)
if res["retcode"] != 0 or res["stderr"]:
return False
return True
cmd = (
"rabbit_auth_backend_internal:check_user_login"
'(<<"{}">>, [{{password, <<"{}">>}}]).'.format(
name.replace('"', '\\"'), password.replace('"', '\\"')
)
)
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "eval", cmd],
reset_system_locale=False,
runas=runas,
output_loglevel="quiet",
python_shell=False,
)
msg = "password-check"
_response = _format_response(res, msg)
_key = next(iter(_response))
if "invalid credentials" in _response[_key]:
return False
return True
def add_vhost(vhost, runas=None):
"""
Adds a vhost via rabbitmqctl add_vhost.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq add_vhost '<vhost_name>'
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "add_vhost", vhost],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
msg = "Added"
return _format_response(res, msg)
def delete_vhost(vhost, runas=None):
"""
Deletes a vhost rabbitmqctl delete_vhost.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.delete_vhost '<vhost_name>'
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "delete_vhost", vhost],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
msg = "Deleted"
return _format_response(res, msg)
def set_permissions(vhost, user, conf=".*", write=".*", read=".*", runas=None):
"""
Sets permissions for vhost via rabbitmqctl set_permissions
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.set_permissions myvhost myuser
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "set_permissions", "-p", vhost, user, conf, write, read],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
msg = "Permissions Set"
return _format_response(res, msg)
def list_permissions(vhost, runas=None):
"""
Lists permissions for vhost via rabbitmqctl list_permissions
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_permissions /myvhost
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "list_permissions", "--formatter=json", "-p", vhost],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
perms = salt.utils.json.loads(res["stdout"])
perms_dict = {}
for perm in perms:
user = perm["user"]
perms_dict[user] = perm
del perms_dict[user]["user"]
return perms_dict
def list_user_permissions(name, runas=None):
"""
List permissions for a user via rabbitmqctl list_user_permissions
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_user_permissions user
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "list_user_permissions", name, "--formatter=json"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
perms = salt.utils.json.loads(res["stdout"])
perms_dict = {}
for perm in perms:
vhost = perm["vhost"]
perms_dict[vhost] = perm
del perms_dict[vhost]["vhost"]
return perms_dict
def set_user_tags(name, tags, runas=None):
"""Add user tags via rabbitmqctl set_user_tags
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.set_user_tags myadmin administrator
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
if not isinstance(tags, (list, tuple)):
tags = [tags]
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "set_user_tags", name] + list(tags),
reset_system_locale=False,
runas=runas,
python_shell=False,
)
msg = "Tag(s) set"
return _format_response(res, msg)
def status(runas=None):
"""
return rabbitmq status
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.status
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "status"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
return res["stdout"]
def cluster_status(runas=None):
"""
return rabbitmq cluster_status
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.cluster_status
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "cluster_status"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
return res["stdout"]
def join_cluster(host, user="rabbit", ram_node=None, runas=None):
"""
Join a rabbit cluster
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.join_cluster rabbit.example.com rabbit
"""
cmd = [RABBITMQCTL, "join_cluster"]
if ram_node:
cmd.append("--ram")
cmd.append("{}@{}".format(user, host))
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
stop_app(runas)
res = __salt__["cmd.run_all"](
cmd, reset_system_locale=False, runas=runas, python_shell=False
)
start_app(runas)
return _format_response(res, "Join")
def stop_app(runas=None):
"""
Stops the RabbitMQ application, leaving the Erlang node running.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.stop_app
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "stop_app"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
return res["stdout"]
def start_app(runas=None):
"""
Start the RabbitMQ application.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.start_app
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "start_app"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
return res["stdout"]
def reset(runas=None):
"""
Return a RabbitMQ node to its virgin state
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.reset
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "reset"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
return res["stdout"]
def force_reset(runas=None):
"""
Forcefully Return a RabbitMQ node to its virgin state
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.force_reset
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "force_reset"],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
return res["stdout"]
def list_queues(runas=None, *args):
"""
Returns queue details of the / virtual host
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_queues messages consumers
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
cmd = [RABBITMQCTL, "list_queues", "-q"]
cmd.extend(args)
res = __salt__["cmd.run_all"](
cmd, reset_system_locale=False, runas=runas, python_shell=False
)
_check_response(res)
return _output_to_dict(res["stdout"])
def list_queues_vhost(vhost, runas=None, *args):
"""
Returns queue details of specified virtual host. This command will consider
first parameter as the vhost name and rest will be treated as
queueinfoitem. For getting details on vhost ``/``, use :mod:`list_queues
<salt.modules.rabbitmq.list_queues>` instead).
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_queues messages consumers
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
cmd = [RABBITMQCTL, "list_queues", "-q", "-p", vhost]
cmd.extend(args)
res = __salt__["cmd.run_all"](
cmd, reset_system_locale=False, runas=runas, python_shell=False
)
_check_response(res)
return _output_to_dict(res["stdout"])
def list_policies(vhost="/", runas=None):
"""
Return a dictionary of policies nested by vhost and name
based on the data returned from rabbitmqctl list_policies.
Reference: http://www.rabbitmq.com/ha.html
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_policies
"""
ret = {}
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "list_policies", "-q", "-p", vhost],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
output = res["stdout"]
if __grains__["os_family"] != "FreeBSD":
version = __salt__["pkg.version"]("rabbitmq-server").split("-")[0]
else:
version = __salt__["pkg.version"]("rabbitmq").split("-")[0]
for line in _output_lines_to_list(output):
parts = line.split("\t")
if len(parts) not in (5, 6):
continue
vhost, name = parts[0], parts[1]
if vhost not in ret:
ret[vhost] = {}
ret[vhost][name] = {}
if _LooseVersion(version) >= _LooseVersion("3.7"):
# in version 3.7 the position of apply_to and pattern has been
# switched
ret[vhost][name]["pattern"] = parts[2]
ret[vhost][name]["apply_to"] = parts[3]
ret[vhost][name]["definition"] = parts[4]
ret[vhost][name]["priority"] = parts[5]
else:
# How many fields are there? - 'apply_to' was inserted in position
# 2 at some point
# and in version 3.7 the position of apply_to and pattern has been
# switched
offset = len(parts) - 5
if len(parts) == 6:
ret[vhost][name]["apply_to"] = parts[2]
ret[vhost][name].update(
{
"pattern": parts[offset + 2],
"definition": parts[offset + 3],
"priority": parts[offset + 4],
}
)
return ret
def set_policy(
vhost, name, pattern, definition, priority=None, runas=None, apply_to=None
):
"""
Set a policy based on rabbitmqctl set_policy.
Reference: http://www.rabbitmq.com/ha.html
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.set_policy / HA '.*' '{"ha-mode":"all"}'
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
if isinstance(definition, dict):
definition = salt.utils.json.dumps(definition)
if not isinstance(definition, str):
raise SaltInvocationError(
"The 'definition' argument must be a dictionary or JSON string"
)
cmd = [RABBITMQCTL, "set_policy", "-p", vhost]
if priority:
cmd.extend(["--priority", priority])
if apply_to:
cmd.extend(["--apply-to", apply_to])
cmd.extend([name, pattern, definition])
res = __salt__["cmd.run_all"](
cmd, reset_system_locale=False, runas=runas, python_shell=False
)
log.debug("Set policy: %s", res["stdout"])
return _format_response(res, "Set")
def delete_policy(vhost, name, runas=None):
"""
Delete a policy based on rabbitmqctl clear_policy.
Reference: http://www.rabbitmq.com/ha.html
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.delete_policy / HA
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "clear_policy", "-p", vhost, name],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
log.debug("Delete policy: %s", res["stdout"])
return _format_response(res, "Deleted")
def policy_exists(vhost, name, runas=None):
"""
Return whether the policy exists based on rabbitmqctl list_policies.
Reference: http://www.rabbitmq.com/ha.html
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.policy_exists / HA
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
policies = list_policies(runas=runas)
return bool(vhost in policies and name in policies[vhost])
def list_available_plugins(runas=None):
"""
Returns a list of the names of all available plugins (enabled and disabled).
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_available_plugins
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
cmd = [_get_rabbitmq_plugin(), "list", "-m"]
ret = __salt__["cmd.run_all"](
cmd, reset_system_locale=False, runas=runas, python_shell=False
)
_check_response(ret)
return _output_to_list(ret["stdout"])
def list_enabled_plugins(runas=None):
"""
Returns a list of the names of the enabled plugins.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_enabled_plugins
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
cmd = [_get_rabbitmq_plugin(), "list", "-m", "-e"]
ret = __salt__["cmd.run_all"](
cmd, reset_system_locale=False, runas=runas, python_shell=False
)
_check_response(ret)
return _output_to_list(ret["stdout"])
def plugin_is_enabled(name, runas=None):
"""
Return whether the plugin is enabled.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.plugin_is_enabled rabbitmq_plugin_name
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
return name in list_enabled_plugins(runas)
def enable_plugin(name, runas=None):
"""
Enable a RabbitMQ plugin via the rabbitmq-plugins command.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.enable_plugin foo
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
cmd = [_get_rabbitmq_plugin(), "enable", name]
ret = __salt__["cmd.run_all"](
cmd, reset_system_locale=False, runas=runas, python_shell=False
)
return _format_response(ret, "Enabled")
def disable_plugin(name, runas=None):
"""
Disable a RabbitMQ plugin via the rabbitmq-plugins command.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.disable_plugin foo
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
cmd = [_get_rabbitmq_plugin(), "disable", name]
ret = __salt__["cmd.run_all"](
cmd, reset_system_locale=False, runas=runas, python_shell=False
)
return _format_response(ret, "Disabled")
def set_upstream(
name,
uri,
prefetch_count=None,
reconnect_delay=None,
ack_mode=None,
trust_user_id=None,
exchange=None,
max_hops=None,
expires=None,
message_ttl=None,
ha_policy=None,
queue=None,
runas=None,
):
"""
Configures an upstream via rabbitmqctl set_parameter. This can be an exchange-upstream,
a queue-upstream or both.
:param str name: The name of the upstream to configure.
The following parameters apply to federated exchanges and federated queues:
:param str uri: The AMQP URI(s) for the upstream.
:param int prefetch_count: The maximum number of unacknowledged messages copied
over a link at any one time. Default: 1000
:param int reconnect_delay: The duration (in seconds) to wait before reconnecting
to the broker after being disconnected. Default: 1
:param str ack_mode: Determines how the link should acknowledge messages.
If set to ``on-confirm`` (the default), messages are acknowledged to the
upstream broker after they have been confirmed downstream. This handles
network errors and broker failures without losing messages, and is the
slowest option.
If set to ``on-publish``, messages are acknowledged to the upstream broker
after they have been published downstream. This handles network errors
without losing messages, but may lose messages in the event of broker failures.
If set to ``no-ack``, message acknowledgements are not used. This is the
fastest option, but may lose messages in the event of network or broker failures.
:param bool trust_user_id: Determines how federation should interact with the
validated user-id feature. If set to true, federation will pass through
any validated user-id from the upstream, even though it cannot validate
it itself. If set to false or not set, it will clear any validated user-id
it encounters. You should only set this to true if you trust the upstream
server (and by extension, all its upstreams) not to forge user-ids.
The following parameters apply to federated exchanges only:
:param str exchange: The name of the upstream exchange. Default is to use the
same name as the federated exchange.
:param int max_hops: The maximum number of federation links that a message
published to a federated exchange can traverse before it is discarded.
Default is 1. Note that even if max-hops is set to a value greater than 1,
messages will never visit the same node twice due to travelling in a loop.
However, messages may still be duplicated if it is possible for them to
travel from the source to the destination via multiple routes.
:param int expires: The expiry time (in milliseconds) after which an upstream
queue for a federated exchange may be deleted, if a connection to the upstream
broker is lost. The default is 'none', meaning the queue should never expire.
This setting controls how long the upstream queue will last before it is
eligible for deletion if the connection is lost.
This value is used to set the "x-expires" argument for the upstream queue.
:param int message_ttl: The expiry time for messages in the upstream queue
for a federated exchange (see expires), in milliseconds. Default is ``None``,
meaning messages should never expire. This does not apply to federated queues.
This value is used to set the "x-message-ttl" argument for the upstream queue.
:param str ha_policy: Determines the "x-ha-policy" argument for the upstream
queue for a federated exchange (see expires). This is only of interest
when connecting to old brokers which determine queue HA mode using this
argument. Default is ``None``, meaning the queue is not HA.
The following parameter applies to federated queues only:
:param str queue: The name of the upstream queue. Default is to use the same
name as the federated queue.
:param str runas: The name of the user to run the command as.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.set_upstream upstream_name ack_mode=on-confirm max_hops=1 \
trust_user_id=True uri=amqp://hostname
.. versionadded:: 3000
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
params = salt.utils.data.filter_falsey(
{
"uri": uri,
"prefetch-count": prefetch_count,
"reconnect-delay": reconnect_delay,
"ack-mode": ack_mode,
"trust-user-id": trust_user_id,
"exchange": exchange,
"max-hops": max_hops,
"expires": expires,
"message-ttl": message_ttl,
"ha-policy": ha_policy,
"queue": queue,
}
)
res = __salt__["cmd.run_all"](
[
RABBITMQCTL,
"set_parameter",
"federation-upstream",
name,
salt.utils.json.dumps(params),
],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
return True
def delete_upstream(name, runas=None):
"""
Deletes an upstream via rabbitmqctl clear_parameter.
:param str name: The name of the upstream to delete.
:param str runas: The name of the user to run the command as.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.delete_upstream upstream_name
.. versionadded:: 3000
"""
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__["cmd.run_all"](
[RABBITMQCTL, "clear_parameter", "federation-upstream", name],
reset_system_locale=False,
runas=runas,
python_shell=False,
)
_check_response(res)
return True
| saltstack/salt | salt/modules/rabbitmq.py | Python | apache-2.0 | 39,358 | [
"VisIt"
] | 83530a6fe2aefcf1b4062d69ccc2bcc8fa8b8b347bdb15c37bd99ac070775c08 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import logging
import copy
import moosetree
import mooseutils
LOG = logging.getLogger(__name__)
class NodeBase(moosetree.Node):
"""
Node for MOOSE syntax that serves as the parent for actions/objects.
"""
def __init__(self, *args, **kwargs):
self.hidden = kwargs.pop('hidden', False)
self.removed = kwargs.pop('removed', False)
self.test = kwargs.pop('test', False)
self.alias = kwargs.pop('alias', None)
self.group = kwargs.pop('group', None)
self.markdown = kwargs.pop('markdown', None)
self.color = kwargs.pop('color', 'RED')
moosetree.Node.__init__(self, *args, **kwargs)
def fullpath(self):
"""
Return the node full path.
"""
return '/'.join([n.name for n in self.path])
def groups(self):
"""
Return groups associated with this node
"""
out = set([self.group]) if self.group is not None else set()
return out
def __repr__(self):
"""
Print the node information.
"""
if self.is_root:
return 'ROOT'
msg = ''
color = self.color
if self.removed:
color = 'GREY'
elif self.test:
color = 'BLUE'
elif not self.hidden:
color = 'LIGHT_' + self.color
msg0 = '{}: {}'.format(self.name, self.fullpath())
msg1 = 'hidden={} removed={} group={} groups={} test={} alias={}'.format(self.hidden,
self.removed,
self.group,
self.groups(),
self.test,
self.alias)
msg0 = mooseutils.colorText(msg0, color)
msg1 = mooseutils.colorText(msg1, 'GREY' if self.removed else 'LIGHT_GREY')
return '{} {}'.format(msg0, msg1)
class SyntaxNode(NodeBase):
"""
Defines a class for syntax only (i.e., a node not attached to a C++ class).
This class extends the 'parameters' property to automatically collect parameters from
all the child actions objects.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('group', kwargs.pop('label', None))
NodeBase.__init__(self, *args, **kwargs)
self.color = 'GREEN'
if self.markdown is None:
self.markdown = os.path.join(self.fullpath().lstrip('/'), 'index.md')
def groups(self, syntax=True, actions=True, objects=False, **kwargs):
"""
Return groups associated with this node (i.e., where the syntax is defined). The **kwargs
may be used to pass the named arguments to __nodeFinder, e.g., recursive=True.
"""
out = set([self.group]) if self.group is not None else set()
if syntax:
for node in self.syntax(**kwargs):
out.update(node.groups())
if actions:
for node in self.actions(**kwargs):
out.update(node.groups())
if objects:
for node in self.objects(**kwargs):
out.update(node.groups())
return out
def parameters(self):
"""
Return the action parameters for the syntax.
"""
parameters = dict()
for action in self.actions():
if action.parameters is not None:
parameters.update(action.parameters)
return parameters
def syntax(self, *args, **kwargs):
"""
Return SyntaxNode nodes (see __nodeFinder).
"""
return self.__nodeFinder(SyntaxNode, *args, **kwargs)
def objects(self, *args, **kwargs):
"""
Return MooseObjectNode nodes (see __nodeFinder).
"""
return self.__nodeFinder(MooseObjectNode, *args, **kwargs)
def actions(self, *args, **kwargs):
"""
Return ActionNode nodes (see __nodeFinder).
"""
return self.__nodeFinder(ActionNode, *args, **kwargs)
def __nodeFinder(self, node_type, syntax='', group=None, recursive=False):
"""
A helper method for finding nodes of a given type, syntax, and group.
Inputs:
node_type[NodeCore]: The type of node to consider.
syntax: (optional) The syntax that must be within the object 'fullpath' property.
group: (optional) The group to limit the search.
recursive: When True the search will look through all nodes in the entire tree, when
False only the children of the node are considered.
"""
if recursive:
filter_ = lambda node: (syntax in node.fullpath()) and \
isinstance(node, node_type) and \
(group is None or group in node.groups())
return moosetree.findall(self, filter_)
else:
return [node for node in self.children if (syntax in node.fullpath()) and \
isinstance(node, node_type) and \
(group is None or group in node.groups())]
class ObjectNodeBase(NodeBase):
"""
Base class for nodes associated with C++ objects (Action, MooseObjectAction, or MooseObject).
"""
def __init__(self, parent, name, **kwargs):
kwargs.setdefault('group', kwargs.pop('label', None))
self.classname = kwargs.pop('classname', kwargs.pop('class', name))
self.description = kwargs.pop('description', None)
self.source = kwargs.pop('source', kwargs.pop('register_file', None))
self.header = kwargs.pop('header', None)
self.parameters = kwargs.pop('parameters', None)
NodeBase.__init__(self, parent, name, **kwargs)
if self.source == '':
LOG.critical("MooseDocs requires the %s object to use the registerMooseObject or " \
"registerMooseAction macro within the source (.C) file, this object " \
"is being removed from the available syntax.", self.name)
self.source = None
self.removed = True
if (self.source is not None) and (not os.path.isfile(self.source)):
LOG.critical("The supplied 'source' file does not exist: %s\n This object is being " \
"removed from the available syntax.", self.source)
self.source = None
self.removed = True
if (self.source is not None) and (self.header is None):
self.header = self.source.replace('/src/', '/include/')[:-1] + 'h'
if (self.header is not None) and (self.markdown is None):
idx = self.header.find('/include/') + len('/include/')
self.markdown = self.header[idx:-1] + 'md'
class MooseObjectNode(ObjectNodeBase):
"""
Node for a registered C++ MooseObject class.
"""
def __init__(self, *args, **kwargs):
ObjectNodeBase.__init__(self, *args, **kwargs)
self.color = 'YELLOW'
class ActionNode(ObjectNodeBase):
"""
Node for a registered C++ Action class.
"""
def __init__(self, *args, **kwargs):
self.tasks = kwargs.pop('tasks', None)
ObjectNodeBase.__init__(self, *args, **kwargs)
self.color = 'MAGENTA'
class MooseObjectActionNode(ActionNode):
"""
Node for a registered C++ MooseObjectAction class.
"""
def __init__(self, *args, **kwargs):
ActionNode.__init__(self, *args, **kwargs)
self.color = 'CYAN'
| harterj/moose | python/moosesyntax/nodes.py | Python | lgpl-2.1 | 8,090 | [
"MOOSE"
] | d531946f67f02721f24d128f5d25b5e48dfd58fab074ecf52837d9cba039a1c4 |
'''
#=============================================================================
# FileName: extract_structures.py
# Desc:
# Author: jlpeng
# Email: jlpeng1201@gmail.com
# HomePage:
# Version: 0.0.1
# Created: 2015-04-09 20:18:19
# LastChange: 2015-04-09 20:29:39
# History:
#=============================================================================
'''
import sys
import pybel
def main(argv=sys.argv):
if len(argv) != 4:
print "\n Usage: %s in.sdf in.list out.sdf"%argv[0]
print ""
sys.exit(1)
candidates = [line.strip() for line in open(argv[2],'r')]
extracted = [False for _ in candidates]
outf = pybel.Outputfile("sdf", argv[3])
inf = pybel.readfile("sdf", argv[1])
for mol in inf:
if False not in extracted:
break
for i in xrange(len(candidates)):
if extracted[i]:
continue
found = False
for key,value in mol.data.iteritems():
if value.lower() == candidates[i].lower():
found = True
break
if found:
extracted[i] = True
mol.title = candidates[i]
outf.write(mol)
break
inf.close()
outf.close()
n = extracted.count(True)
print "totally extracted %d molecules from %s"%(n, argv[1])
if n < len(candidates):
print "The following can't be found"
for i in xrange(len(extracted)):
if not extracted[i]:
print candidates[i]
main()
| Jianlong-Peng/pytools | miscellaneous/extract_structures.py | Python | gpl-2.0 | 1,610 | [
"Pybel"
] | 7043b0657045fa69c79de78cfa9e19756cbcc19e14f82378fb9b8a681446316f |
# proxy module
from __future__ import absolute_import
from mayavi.sources.builtin_surface import *
| enthought/etsproxy | enthought/mayavi/sources/builtin_surface.py | Python | bsd-3-clause | 99 | [
"Mayavi"
] | 2a9db5fd162654b1c5fd882ad0f22c32ea47b19b8ab3026c4bc7253dd88c9fc7 |
from gpiozero import TrafficLights
import time
def british_lights_cycle(lights, cycle_time):
lights.off()
# Red
lights.red.on()
time.sleep(cycle_time)
# Red and Amber
lights.amber.on()
time.sleep(cycle_time)
# Green
lights.red.off()
lights.amber.off()
lights.green.on()
time.sleep(cycle_time)
# Amber
lights.green.off()
lights.amber.on()
time.sleep(cycle_time)
# Red
lights.amber.off()
lights.red.on()
time.sleep(cycle_time)
lights = TrafficLights(17, 27, 22)
while True:
british_lights_cycle(lights, 0.75)
lights.off()
time.sleep(0.75)
lights.off()
print "done!"
| claremacrae/raspi_code | hardware/gpio/TrafficLights_gpiozero.py | Python | mit | 667 | [
"Amber"
] | 2ef992af399efbe3f6ab9dc7f5f19d920d246c75b8d28f868ec69d0a0b244070 |
#!/usr/bin/env python
"""gitchangelog: Generate a change log based on closed pull requests."""
from __future__ import unicode_literals
__version__ = '1.6.0'
import six
from six.moves import input
import sys
import os
import os.path
import json
import codecs
import argparse
import re
from pygithub3 import Github
class GithubChangelog(object):
MARKDOWN_LINK = re.compile(r'\[(?P<label>.+?)\]\((?P<url>.+?)\)')
HTML_LINK = re.compile(r'<a.*href=[\'"](?P<url>.+?)[\'"].*>(?P<label>.+?)</a>')
GITFLOW_BRANCH = ["master", "staging", "stage"]
def __init__(self, debug=False):
self.debug = debug
self.api = None # Github API object
self.repo = None # Active repo name
def main(self, args):
"""Entry point for running this module as an app."""
self.debug = args.debug
# The user's profile directory.
self.profile = os.path.expanduser("~/.config")
self.config = os.path.join(self.profile, "gitchangelog")
# Authenticate.
self.authenticate(user=args.user, token=args.token, repo=args.repo,
reset=args.init)
# If not using --init, the repo name and start number are required.
if not args.init:
if not args.repo:
die("The repository name (--repo) is required to continue.")
if not args.start and not args.after:
die("The issue start number (--start) is required to continue.")
else:
sys.exit(0)
self.repo = args.repo
# Scan the pull requests.
changes = self.scan_pulls(
start=args.start,
stop=args.stop,
after=args.after,
exclude=args.exclude,
)
# Pretty print the result!
six.print_("\nChanges:\n")
six.print_("\n".join(changes))
def authenticate(self, user=None, token=None, repo=None, reset=False):
"""Handle authentication with the GitHub API."""
self.say("Authentication begin")
save = False # Save settings to their config file.
# Read settings from disk.
if not user or not token:
config = self.read_settings()
if not user:
user = config[0]
if not token:
token = config[1]
# Resetting credentials?
if reset:
user = token = None
# Username provided?
if not user:
save = True
user = input("GitHub username> ")
user = user.strip()
if not user:
die("Username is required for GitHub authentication.")
# Token provided?
if not token:
save = True
six.print_("This app will require a personal access token for\n" \
+ "your GitHub account. Visit the URL below and create\n" \
+ "a personal access token, and then paste that token\n" \
+ "at the prompt below:\n" \
+ "https://github.com/settings/applications\n")
token = input("Personal access token> ")
token = token.strip()
if not token:
die("Token is required for GitHub authentication.")
# Saving the configuration?
if save:
# Create the profile directory, if necessary.
if not os.path.isdir(self.profile):
self.say("Creating config directory: {}".format(self.profile))
os.mkdir(self.profile)
self.save_settings(user, token)
six.print_("Settings saved to {}".format(self.config))
# Initialize the GitHub API object.
self.api = Github(user=user, token=token, repo=repo)
def scan_pulls(self, start=None, stop=None, after=None, exclude=None):
"""Scan closed pull requests starting from #start and optionally
stopping at #stop."""
# Get all closed pull requests.
pulls = self.api.pull_requests.list(state="closed").all()
pulls.reverse()
# If a start ID wasn't provided, but after was, look up the first
# pull request that was merged AFTER the given --after option was
# merged.
min_closed_date = None
if start is None and after is not None:
# Get the pull request from the --after option.
pull = self.api.pull_requests.get(after)
if not pull:
six.print_("Could not find the --after pull request #{}".format(
after
))
sys.exit(1)
min_closed_date = pull.closed_at
six.print_("-- Scanning pull requests... --")
changes = list()
for pull in pulls:
# Skip pull requests outside our requested range.
if stop and pull.number > stop:
continue
if start is not None and pull.number <= start:
continue
# If we have a minimum closed date, this PR must've been closed
# after that date.
if min_closed_date and pull.closed_at <= min_closed_date:
continue
# If the PR was closed instead of merged, skip it.
if not self.api.pull_requests.is_merged(pull.number):
self.say("Skip closed (not merged) pull request {}".format(
pull.number
))
continue
# If we're excluding merges into this branch, skip it.
if type(exclude) is list and pull.base["ref"] in exclude:
continue
# Add the pull request title to our change log.
changes.append("* #{} - {} - @{}".format(
pull.number, pull.title, pull.user.get("login")
))
self.say("Found closed pull request: {}".format(changes[-1]))
# Get the issue for it to look up the comments (cuz comments on
# the pull object don't work???)
comments = [ x.body for x in self.api.issues.comments.list(pull.number).all() ]
if not "Changes:\n" in pull.body:
# Try not to redundantly paste links from previous merges.
comments.insert(0, pull.body)
# Are we using the gitflow workflow? If so, we do not enumerate
# links in pull requests going from develop -> staging -> master,
# as these PR's will tend to have other outputs from this script
# in their comments. This lessens huge lists of redundant links
# showing up in your changelogs under the "develop to staging" PR's.
if args.gitflow and pull.base["ref"] in self.GITFLOW_BRANCH:
continue
# Scan all comments for unique links.
urls = set()
for comment in comments:
# Scan it for links.
for link in self.find_links(comment):
label, url = link
if url in urls:
continue
changes.append(" * [{}]({})".format(label, url))
self.say("Found link in comment: {}".format(changes[-1]))
urls.add(url)
return changes
def find_links(self, comment):
"""Search a comment for hyperlinks."""
links = list()
for regex in [self.MARKDOWN_LINK, self.HTML_LINK]:
matches = [m.groupdict() for m in regex.finditer(comment)]
for match in matches:
links.append([match["label"], match["url"]])
return links
def save_settings(self, user, token):
"""Save settings to disk."""
fh = codecs.open(self.config, "w", "utf-8")
fh.write(json.dumps(dict(
user=user,
token=token,
)))
fh.close()
def read_settings(self):
"""Read settings from disk.
Returns (user, token) or (None, None) if no setting file found."""
if not os.path.isfile(self.config):
return (None, None)
fh = codecs.open(self.config, "r", "utf-8")
data = json.loads(fh.read())
fh.close()
return (data.get("user"), data.get("token"))
def say(self, message):
"""Print a debug message if debugging is on."""
if self.debug:
six.print_("DEBUG:", message)
def die(error):
six.print_(error)
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser("gitchangelog")
parser.add_argument("--debug", "-d",
help="Debug mode.",
action="store_true",
)
parser.add_argument("--init", "-i",
help="Initialize the authentication settings ONLY. You can use this " \
+ "to configure your default username and access token without " \
+ "actually continuing with scanning a repository's pull requests.",
action="store_true",
)
parser.add_argument("--user", "-u",
help="Username to authenticate with in Github. If not provided, you " \
+ "will be prompted for it and the setting will be saved in " \
+ "~/.config/gitchangelog or equivalent for your system. " \
+ "If working with an organization, use the org username.",
)
parser.add_argument("--token", "-t",
help="OAuth personal access token for authentication. If not " \
+ "provided, you will be prompted for it and the setting will be " \
+ "saved in ~/.config/gitchangelog or equivalent for your system.",
)
parser.add_argument("--repo", "-r",
help="Repository to run the changelog for.",
)
parser.add_argument("--after", "-a",
help="Include all pull requests that were merged *after* the date " \
+ "that this one was merged on. This is the simplest option to " \
+ "use; just set `--after` to be the pull request ID of your " \
+ "latest deployment pull request. All PR's that were merged " \
+ "*after* that one was merged will be included (you can use " \
+ "this instead of --start/--stop)",
type=int,
)
parser.add_argument("--start", "-s",
help="Issue number for the pull request you want to start from. " \
+ "For example, if you occasionally do a merge from 'develop' to " \
+ "'master', and you want a change log of the pull requests " \
+ "merged from your last deploy, you'd enter the issue number of " \
+ "the *last* merge from develop to master.",
type=int,
)
parser.add_argument("--stop", "-x",
help="Issue number to stop at (optional). The default is to check " \
+ "all pull requests *after* the `--start` option. Provide " \
+ "`--stop` to stop at a different number instead.",
type=int,
)
parser.add_argument("--gitflow", "-g",
help="Use a `git flow` style for branch management. With this " \
"enabled, pull requests that go from develop to staging, or " \
"staging to master, do not have their comment hyperlinks " \
"enumerated (as their comments will tend to be other outputs " \
"from github-changelog and end up in a lot of redundant links).",
action="store_true",
)
parser.add_argument("--exclude", "-X",
help="Exclude pull requests that merge into this branch. For example " \
"if you have a long-lived feature branch that isn't being " \
"deployed in your main release, you can use the --exclude option " \
"and name that branch. Its pull requests don't get included in " \
"the change log output in this case. This option can be " \
"specified multiple times.",
action="append",
type=six.text_type,
)
args = parser.parse_args()
github = GithubChangelog()
github.main(args)
| kirsle/github-changelog | gitchangelog.py | Python | mit | 11,969 | [
"VisIt"
] | 8f0a282e71587d9f5623837bc6bd3dac39f46f803f6b89d9d1089ac1f5fd929f |
"""
.. module:: analyze
:synopsis: Extract data from chains and produce plots
.. moduleauthor:: Karim Benabed <benabed@iap.fr>
.. moduleauthor:: Benjamin Audren <benjamin.audren@epfl.ch>
Collection of functions needed to analyze the Markov chains.
This module defines as well a class :class:`Information`, that stores useful
quantities, and shortens the argument passing between the functions.
.. note::
Some of the methods used in this module are directly adapted from the
`CosmoPmc <http://www.cosmopmc.info>`_ code from Kilbinger et. al.
"""
import os
import math
import numpy as np
from itertools import count
# The root plotting module, to change options like font sizes, etc...
import matplotlib
# The following line suppresses the need for an X server
matplotlib.use("Agg")
# Module for handling display
import matplotlib.pyplot as plt
# Module to handle warnings from matplotlib
import warnings
import importlib
import io_mp
from itertools import ifilterfalse
from itertools import ifilter
import scipy.ndimage
# Defined to remove the burnin for all the points that were produced before the
# first time where -log-likelihood <= min-minus-log-likelihood+LOG_LKL_CUTOFF
LOG_LKL_CUTOFF = 3
NUM_COLORS = 6
def analyze(command_line):
"""
Main function, does the entire analysis.
It calls in turn all the other routines from this module. To limit the
arguments of each function to a reasonnable size, a :class:`Information`
instance is used. This instance is initialized in this function, then
appended by the other routines.
"""
# Check if the scipy module has the interpolate method correctly
# installed (should be the case on every linux distribution with
# standard numpy)
try:
from scipy.interpolate import interp1d
Information.has_interpolate_module = True
except ImportError:
Information.has_interpolate_module = False
warnings.warn(
'No cubic interpolation done (no interpolate method found ' +
'in scipy), only linear')
# Determine how many different folders are asked through the 'info'
# command, and create as many Information instances
files = separate_files(command_line.files)
# Create an instance of the Information class for each subgroup found in
# the previous function. They will each hold all relevant information, and
# be used as a compact way of exchanging information between functions
information_instances = []
for item in files:
info = Information(command_line)
information_instances.append(info)
# Prepare the files, according to the case, load the log.param, and
# prepare the output (plots folder, .covmat, .info and .log files).
# After this step, info.files will contain all chains.
status = prepare(item, info)
# If the preparation step generated new files (for instance,
# translating from NS or CH to Markov Chains) this routine should stop
# now.
if not status:
return
# Compute the mean, maximum of likelihood, 1-sigma variance for this
# main folder. This will create the info.chain object, which contains
# all the points computed stacked in one big array.
convergence(info)
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
command_line.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
command_line.update = 0
# compute covariance matrix, excepted when we are in update mode and convergence is too bad or too good
if command_line.update and (np.amax(info.R) > 3. or np.amax(info.R) < 0.4):
print '--> Not computing covariance matrix'
else:
try:
if command_line.want_covmat:
print '--> Computing covariance matrix'
info.covar = compute_covariance_matrix(info)
# Writing it out in name_of_folder.covmat
io_mp.write_covariance_matrix(
info.covar, info.backup_names, info.cov_path)
except:
print '--> Computing covariance matrix failed'
pass
# Store an array, sorted_indices, containing the list of indices
# corresponding to the line with the highest likelihood as the first
# element, and then as decreasing likelihood
info.sorted_indices = info.chain[:, 1].argsort(0)
# Writing the best-fit model in name_of_folder.bestfit
bestfit_line = [elem*info.scales[i, i] for i, elem in
enumerate(info.chain[info.sorted_indices[0], 2:])]
io_mp.write_bestfit_file(bestfit_line, info.backup_names,
info.best_fit_path)
if not command_line.minimal:
# Computing 1,2 and 3-sigma errors, and plot. This will create the
# triangle and 1d plot by default.
compute_posterior(information_instances)
print '--> Writing .info and .tex files'
for info in information_instances:
info.write_information_files()
# when called by MCMC in update mode, return R values so that they can be written for information in the chains
if command_line.update:
return info.R
def prepare(files, info):
"""
Scan the whole input folder, and include all chains in it.
Since you can decide to analyze some file(s), or a complete folder, this
function first needs to separate between the two cases.
.. warning::
If someday you change the way the chains are named, remember to change
here too, because this routine assumes the chains have a double
underscore in their names.
.. note::
Only files ending with .txt will be selected, to keep compatibility
with CosmoMC format
.. note::
New in version 2.0.0: if you ask to analyze a Nested Sampling
sub-folder (i.e. something that ends in `NS` with capital letters), the
analyze module will translate the output from Nested Sampling to
standard chains for Monte Python, and stops. You can then run the
`-- info` flag on the whole folder. **This procedure is not necessary
if the run was complete, but only if the Nested Sampling run was killed
before completion**.
Parameters
----------
files : list
list of potentially only one element, containing the files to analyze.
This can be only one file, or the encompassing folder, files
info : Information instance
Used to store the result
"""
# First test if the folder is a Nested Sampling or CosmoHammer folder. If
# so, call the module's own routine through the clean conversion function,
# which will translate the output of this other sampling into MCMC chains
# that can then be analyzed.
modules = ['nested_sampling', 'cosmo_hammer']
tags = ['NS', 'CH']
for module_name, tag in zip(modules, tags):
action_done = clean_conversion(module_name, tag, files[0])
if action_done:
return False
# If the input command was an entire folder, then grab everything in it.
# Too small files (below 600 octets) and subfolders are automatically
# removed.
folder, files, basename = recover_folder_and_files(files)
info.files = files
info.folder = folder
info.basename = basename
# Check if the log.param file exists
parameter_file_path = os.path.join(folder, 'log.param')
if os.path.isfile(parameter_file_path):
if os.path.getsize(parameter_file_path) == 0:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"seems empty")
else:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"is missing in the analyzed folder?")
# If the folder has no subdirectory, then go for a simple infoname,
# otherwise, call it with the last name
basename = (os.path.basename(folder) if os.path.basename(folder) != '.'
else os.path.basename(os.path.abspath(
os.path.join(folder, '..'))))
info.v_info_path = os.path.join(folder, basename+'.v_info')
info.h_info_path = os.path.join(folder, basename+'.h_info')
info.tex_path = os.path.join(folder, basename+'.tex')
info.cov_path = os.path.join(folder, basename+'.covmat')
info.log_path = os.path.join(folder, basename+'.log')
info.best_fit_path = os.path.join(folder, basename+'.bestfit')
info.param_path = parameter_file_path
return True
def convergence(info):
"""
Compute convergence for the desired chains, using Gelman-Rubin diagnostic
Chains have been stored in the info instance of :class:`Information`. Note
that the G-R diagnostic can be computed for a single chain, albeit it will
most probably give absurd results. To do so, it separates the chain into
three subchains.
"""
# Recovering parameter names and scales, creating tex names,
extract_parameter_names(info)
# Now that the number of parameters is known, the array containing bounds
# can be initialised
info.bounds = np.zeros((len(info.ref_names), len(info.levels), 2))
# Circle through all files to find the global maximum of likelihood
#print '--> Finding global maximum of likelihood'
find_maximum_of_likelihood(info)
# Restarting the circling through files, this time removing the burnin,
# given the maximum of likelihood previously found and the global variable
# LOG_LKL_CUTOFF. spam now contains all the accepted points that were
# explored once the chain moved within min_minus_lkl - LOG_LKL_CUTOFF.
# If the user asks for a keep_fraction <1, this is also the place where
# a fraction (1-keep_fraction) is removed at the beginning of each chain.
#print '--> Removing burn-in'
spam = remove_bad_points(info)
info.remap_parameters(spam)
# Now that the list spam contains all the different chains removed of
# their respective burn-in, proceed to the convergence computation
# 2D arrays for mean and var, one column will contain the total (over
# all chains) mean (resp. variance), and each other column the
# respective chain mean (resp. chain variance). R only contains the
# values for each parameter. Therefore, mean and var will have len(spam)+1
# as a first dimension
mean = np.zeros((len(spam)+1, info.number_parameters))
var = np.zeros((len(spam)+1, info.number_parameters))
R = np.zeros(info.number_parameters)
# Store the total number of points, and the total in each chain
total = np.zeros(len(spam)+1)
for j in xrange(len(spam)):
total[j+1] = spam[j][:, 0].sum()
total[0] = total[1:].sum()
# Compute mean and variance for each chain
print '--> Computing mean values'
compute_mean(mean, spam, total)
print '--> Computing variance'
compute_variance(var, mean, spam, total)
print '--> Computing convergence criterium (Gelman-Rubin)'
# Gelman Rubin Diagnostic:
# Computes a quantity linked to the ratio of the mean of the variances of
# the different chains (within), and the variance of the means (between)
# Note: This is not strictly speaking the Gelman Rubin test, defined for
# same-length MC chains. Our quantity is defined without the square root,
# which should not change much the result: a small sqrt(R) will still be a
# small R. The same convention is used in CosmoMC, except for the weighted
# average: we decided to do the average taking into account that longer
# chains should count more
within = 0
between = 0
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
within += total[j+1]*var[j+1, i]
between += total[j+1]*(mean[j+1, i]-mean[0, i])**2
within /= total[0]
between /= (total[0]-1)
R[i] = between/within
if i == 0:
print ' -> R-1 is %.6f' % R[i], '\tfor ', info.ref_names[i]
else:
print ' %.6f' % R[i], '\tfor ', info.ref_names[i]
# Log finally the total number of steps, and absolute loglikelihood
with open(info.log_path, 'a') as log:
log.write("--> Total number of steps: %d\n" % (
info.steps))
log.write("--> Total number of accepted steps: %d\n" % (
info.accepted_steps))
log.write("--> Minimum of -logLike : %.2f" % (
info.min_minus_lkl))
# Store the remaining members in the info instance, for further writing to
# files, storing only the mean and total of all the chains taken together
info.mean = mean[0]
info.R = R
info.total = total[0]
# Create the main chain, which consists in all elements of spam
# put together. This will serve for the plotting.
info.chain = np.vstack(spam)
def compute_posterior(information_instances):
"""
computes the marginalized posterior distributions, and optionnally plots
them
Parameters
----------
information_instances : list
list of information objects, initialised on the given folders, or list
of file, in input. For each of these instance, plot the 1d and 2d
posterior distribution, depending on the flags stored in the instances,
comming from command line arguments or read from a file.
"""
# For convenience, store as `conf` the first element of the list
# information_instances, since it will be called often to check for
# configuration parameters
conf = information_instances[0]
# Pre configuration of the output, note that changes to the font size
# will occur later on as well, to obtain a nice scaling.
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=11)
matplotlib.rc('xtick', labelsize='8')
matplotlib.rc('ytick', labelsize='8')
# Recover max and min values for each instance, defining the a priori place
# of ticks (in case of a comparison, this should change)
for info in information_instances:
info.define_ticks()
# If plots/ folder in output folder does not exist, create it
if os.path.isdir(os.path.join(info.folder, 'plots')) is False:
os.mkdir(os.path.join(info.folder, 'plots'))
# Determine the total number of parameters to plot, based on the list
# without duplicates of the plotted parameters of all information instances
plotted_parameters = []
# For printing not in latex
ref_names = []
for info in information_instances:
for index, name in enumerate(info.plotted_parameters):
if name not in plotted_parameters:
plotted_parameters.append(name)
ref_names.append(info.ref_names[index])
if len(plotted_parameters) == 0:
raise io_mp.AnalyzeError(
"You provided no parameters to analyze, probably by selecting"
" wrong parameters names in the '--extra' file.")
# Find the appropriate number of columns and lines for the 1d posterior
# plot
if conf.num_columns_1d == None:
num_columns = int(round(math.sqrt(len(plotted_parameters))))
else:
num_columns = conf.num_columns_1d
num_lines = int(math.ceil(len(plotted_parameters)*1.0/num_columns))
# For special needs, you can impose here a different number of columns and lines in the 1d plot
# Here is a commented example:
# if (len(plotted_parameters) == 10):
# num_columns = 5
# num_lines = 2
# Create the figures
# which will be 3*3 inches per subplot, quickly growing!
if conf.plot:
fig1d = plt.figure(num=1, figsize=(
3*num_columns,
3*num_lines), dpi=80)
if conf.plot_2d:
fig2d = plt.figure(num=2, figsize=(
3*len(plotted_parameters),
3*len(plotted_parameters)), dpi=80)
# Create the name of the files, concatenating the basenames with
# underscores.
file_name = "_".join(
[info.basename for info in information_instances])
# Loop over all the plotted parameters
# There will be two indices at all time, the one running over the plotted
# parameters, `index`, and the one corresponding to the actual column in
# the actual file, `native_index`. For instance, if you try to plot only
# two columns of a several columns file, index will vary from 0 to 1, but
# the corresponding native indices might be anything.
# Obviously, since plotted parameters contain potentially names not
# contained in some files (in case of a comparison), native index might be
# undefined.
# Defined the legends object, which will store the plot style, to display
# at the level of the figure
legends = [None for _ in range(len(information_instances))]
if not conf.legendnames:
legend_names = [info.basename.replace('_', ' ')
for info in information_instances]
else:
legend_names = conf.legendnames
print '-----------------------------------------------'
for index, name in enumerate(plotted_parameters):
# Adding the subplots to the respective figures, this will correspond
# to the diagonal on the triangle plot.
if conf.plot_2d:
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
index*(len(plotted_parameters)+1)+1,
yticks=[])
if conf.plot:
ax1d = fig1d.add_subplot(
num_lines, num_columns, index+1, yticks=[])
# check for each instance if the name is part of the list of plotted
# parameters, and if yes, store the native_index. If not, store a flag
# to ignore any further plotting or computing issues concerning this
# particular instance.
for info in information_instances:
try:
info.native_index = info.ref_names.index(name)
info.ignore_param = False
standard_name = info.backup_names[info.native_index]
except ValueError:
info.ignore_param = True
# The limits might have been enforced by the user
if name in conf.force_limits.iterkeys():
x_span = conf.force_limits[name][1]-conf.force_limits[name][0]
tick_min = conf.force_limits[name][0] +0.1*x_span
tick_max = conf.force_limits[name][1] -0.1*x_span
ticks = np.linspace(tick_min,
tick_max,
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = conf.force_limits[name]
info.ticks[info.native_index] = ticks
# otherwise, find them automatically
else:
adjust_ticks(name, information_instances)
print ' -> Computing histograms for ', name
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (first step)
#
# simply the histogram from the chains, with few bins
#
info.hist, info.bin_edges = np.histogram(
info.chain[:, info.native_index+2], bins=info.bins,
weights=info.chain[:, 0], normed=False, density=False)
info.hist = info.hist/info.hist.max()
info.bincenters = 0.5*(info.bin_edges[1:]+info.bin_edges[:-1])
# 1D posterior normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
info.interp_hist, info.interp_grid = cubic_interpolation(
info, info.hist, info.bincenters)
# minimum credible interval (method by Jan Haman). Fails for
# multimodal histograms
bounds = minimum_credible_intervals(info)
info.bounds[info.native_index] = bounds
# plotting
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# factor by which the grid has been made thinner (10 means 10 times more bins)
interpolation_factor = float(len(info.interp_grid))/float(len(info.bincenters))
# factor for gaussian smoothing
sigma = interpolation_factor*info.gaussian_smoothing
# smooth
smoothed_interp_hist = scipy.ndimage.filters.gaussian_filter(info.interp_hist,sigma)
# re-normalised
smoothed_interp_hist = smoothed_interp_hist/smoothed_interp_hist.max()
if conf.plot_2d:
##################################################
# plot 1D posterior in diagonal of triangle plot #
##################################################
plot = ax2d.plot(
info.interp_grid,
smoothed_interp_hist,
linewidth=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
legends[info.id] = plot[0]
ax2d.set_xticks(info.ticks[info.native_index])
if conf.legend_style == 'top':
ax2d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
elif conf.legend_style == 'sides':
# Except for the last 1d plot (bottom line), don't
# print ticks
if index == len(plotted_parameters)-1:
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax2d.tick_params('x',direction='inout')
ax2d.set_xlabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
else:
ax2d.set_xticklabels([])
ax2d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
if conf.plot:
if conf.short_title_1d:
ax1d.set_title(
'%s'.format(info.decimal) % (
info.tex_names[info.native_index]),
fontsize=info.fontsize)
else:
# Note the use of double curly brackets {{ }} to produce
# the desired LaTeX output. This is necessary because the
# format function would otherwise understand single
# brackets as fields.
ax1d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax1d.set_xticks(info.ticks[info.native_index])
ax1d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax1d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
##################################################
# plot 1D posterior in 1D plot #
##################################################
ax1d.plot(
info.interp_grid,
# gaussian filtered 1d posterior:
smoothed_interp_hist,
# raw 1d posterior:
#info.interp_hist,
lw=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
# uncomment if you want to see the raw points from the histogram
# (to check whether the inteprolation and smoothing generated artefacts)
#ax1d.plot(
# info.bincenters,
# info.hist,
# 'ro')
if conf.mean_likelihood:
for info in information_instances:
if not info.ignore_param:
try:
# 1D mean likelihood normalised to P_max=1 (first step)
#
# simply the histogram from the chains, weighted by mutiplicity*likelihood
#
lkl_mean, _ = np.histogram(
info.chain[:, info.native_index+2],
bins=info.bin_edges,
normed=False,
weights=np.exp(
conf.min_minus_lkl-info.chain[:, 1])*info.chain[:, 0])
lkl_mean /= lkl_mean.max()
# 1D mean likelihood normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
interp_lkl_mean, interp_grid = cubic_interpolation(
info, lkl_mean, info.bincenters)
# 1D mean likelihood normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# smooth
smoothed_interp_lkl_mean = scipy.ndimage.filters.gaussian_filter(interp_lkl_mean,sigma)
# re-normalised
smoothed_interp_lkl_mean = smoothed_interp_lkl_mean/smoothed_interp_lkl_mean.max()
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
########################################################
# plot 1D mean likelihood in diagonal of triangle plot #
########################################################
if conf.plot_2d:
# raw mean likelihoods:
#ax2d.plot(info.bincenter, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax2d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
########################################################
# plot 1D mean likelihood in 1D plot #
########################################################
if conf.plot:
# raw mean likelihoods:
#ax1d.plot(info.bincenters, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax1d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
except:
print 'could not find likelihood contour for ',
print info.ref_parameters[info.native_index]
if conf.subplot is True:
if conf.plot_2d:
extent2d = ax2d.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
fig2d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent2d.expanded(1.1, 1.4))
if conf.plot:
extent1d = ax1d.get_window_extent().transformed(
fig1d.dpi_scale_trans.inverted())
fig1d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent1d.expanded(1.1, 1.4))
# Store the function in a file
for info in information_instances:
if not info.ignore_param:
hist_file_name = os.path.join(
info.folder, 'plots',
info.basename+'_%s.hist' % (
standard_name))
write_histogram(hist_file_name,
info.interp_grid, info.interp_hist)
# Now do the rest of the triangle plot
if conf.plot_2d:
for second_index in xrange(index):
second_name = plotted_parameters[second_index]
for info in information_instances:
if not info.ignore_param:
try:
info.native_second_index = info.ref_names.index(
plotted_parameters[second_index])
info.has_second_param = True
second_standard_name = info.backup_names[
info.native_second_index]
except ValueError:
info.has_second_param = False
else:
info.has_second_param = False
ax2dsub = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
(index)*len(plotted_parameters)+second_index+1)
for info in information_instances:
if info.has_second_param:
ax2dsub.axis([info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]])
# 2D likelihood (first step)
#
# simply the histogram from the chains, with few bins only
#
info.n, info.xedges, info.yedges = np.histogram2d(
info.chain[:, info.native_index+2],
info.chain[:, info.native_second_index+2],
weights=info.chain[:, 0],
bins=(info.bins, info.bins),
normed=False)
info.extent = [
info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]]
info.x_centers = 0.5*(info.xedges[1:]+info.xedges[:-1])
info.y_centers = 0.5*(info.yedges[1:]+info.yedges[:-1])
# 2D likelihood (second step)
#
# like for 1D, interpolate to get a finer grid
# TODO: we should not only interpolate between bin centers, but also extrapolate between side bin centers and bin edges
#
interp_y_centers = scipy.ndimage.zoom(info.y_centers,info.interpolation_smoothing, mode='reflect')
interp_x_centers = scipy.ndimage.zoom(info.x_centers,info.interpolation_smoothing, mode='reflect')
interp_likelihood = scipy.ndimage.zoom(info.n,info.interpolation_smoothing, mode='reflect')
# 2D likelihood (third step)
#
# gaussian smoothing
#
sigma = info.interpolation_smoothing*info.gaussian_smoothing
interp_smoothed_likelihood = scipy.ndimage.filters.gaussian_filter(interp_likelihood,[sigma,sigma], mode='reflect')
# Execute some customisation scripts for the 2d contour plots
if (info.custom2d != []):
for elem in info.custom2d:
execfile('plot_files/'+elem)
# plotting contours, using the ctr_level method (from Karim
# Benabed). Note that only the 1 and 2 sigma contours are
# displayed (due to the line with info.levels[:2])
try:
###########################
# plot 2D filled contours #
###########################
if not info.contours_only:
contours = ax2dsub.contourf(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha=info.alphas[info.id])
# now add a thin darker line
# around the 95% contour
ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[1:2]),
zorder=4,
colors = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id],
linewidths=1)
###########################
# plot 2D contours #
###########################
if info.contours_only:
contours = ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent, levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha = info.alphas[info.id],
linewidths=info.line_width)
except Warning:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
ax2dsub.set_xticks(info.ticks[info.native_second_index])
ax2dsub.set_yticks(info.ticks[info.native_index])
ax2dsub.tick_params('both',direction='inout',top=True,bottom=True,left=True,right=True)
if index == len(plotted_parameters)-1:
ax2dsub.set_xticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_second_index]],
fontsize=info.ticksize)
if conf.legend_style == 'sides':
ax2dsub.set_xlabel(
info.tex_names[info.native_second_index],
fontsize=info.fontsize)
else:
ax2dsub.set_xticklabels([''])
ax2dsub.set_yticks(info.ticks[info.native_index])
if second_index == 0:
ax2dsub.set_yticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_index]],
fontsize=info.ticksize)
else:
ax2dsub.set_yticklabels([''])
if conf.legend_style == 'sides':
if second_index == 0:
ax2dsub.set_ylabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
if conf.subplot is True:
# Store the individual 2d plots.
if conf.plot_2d:
area = ax2dsub.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
# Pad the saved area by 10% in the x-direction and 20% in
# the y-direction
fig2d.savefig(os.path.join(
conf.folder, 'plots',
file_name+'_2d_%s-%s.%s' % (
standard_name, second_standard_name,
conf.extension)),
bbox_inches=area.expanded(1.4, 1.4))
# store the coordinates of the points for further
# plotting.
store_contour_coordinates(
conf, standard_name, second_standard_name, contours)
for info in information_instances:
if not info.ignore_param and info.has_second_param:
info.hist_file_name = os.path.join(
info.folder, 'plots',
'{0}_2d_{1}-{2}.hist'.format(
info.basename,
standard_name,
second_standard_name))
write_histogram_2d(
info.hist_file_name, info.x_centers, info.y_centers,
info.extent, info.n)
print '-----------------------------------------------'
if conf.plot:
print '--> Saving figures to .{0} files'.format(info.extension)
plot_name = '-vs-'.join([os.path.split(elem.folder)[-1]
for elem in information_instances])
if conf.plot_2d:
# Legend of triangle plot
if ((conf.plot_legend_2d == None) and (len(legends) > 1)) or (conf.plot_legend_2d == True):
# Create a virtual subplot in the top right corner,
# just to be able to anchor the legend nicely
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
len(plotted_parameters),
)
ax2d.axis('off')
try:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
fontsize=info.legendsize)
except TypeError:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
prop={'fontsize': info.legendsize})
fig2d.subplots_adjust(wspace=0, hspace=0)
fig2d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_triangle.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
# Legend of 1D plot
if conf.plot:
if ((conf.plot_legend_1d == None) and (len(legends) > 1)) or (conf.plot_legend_1d == True):
# no space left: add legend to thr right
if len(plotted_parameters)<num_columns*num_lines:
fig1d.legend(legends, legend_names,
loc= ((num_columns-0.9)/num_columns,0.1/num_columns),
fontsize=info.legendsize)
# space left in lower right part: add legend there
else:
fig1d.legend(legends, legend_names,
loc= 'center right',
bbox_to_anchor = (1.2,0.5),
fontsize=info.legendsize)
fig1d.tight_layout()
fig1d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_1d.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
def ctr_level(histogram2d, lvl, infinite=False):
"""
Extract the contours for the 2d plots (Karim Benabed)
"""
hist = histogram2d.flatten()*1.
hist.sort()
cum_hist = np.cumsum(hist[::-1])
cum_hist /= cum_hist[-1]
alvl = np.searchsorted(cum_hist, lvl)[::-1]
clist = [0]+[hist[-i] for i in alvl]+[hist.max()]
if not infinite:
return clist[1:]
return clist
def minimum_credible_intervals(info):
"""
Extract minimum credible intervals (method from Jan Haman) FIXME
"""
histogram = info.hist
bincenters = info.bincenters
levels = info.levels
bounds = np.zeros((len(levels), 2))
j = 0
delta = bincenters[1]-bincenters[0]
left_edge = max(histogram[0] - 0.5*(histogram[1]-histogram[0]), 0.)
right_edge = max(histogram[-1] + 0.5*(histogram[-1]-histogram[-2]), 0.)
failed = False
for level in levels:
norm = float(
(np.sum(histogram)-0.5*(histogram[0]+histogram[-1]))*delta)
norm += 0.25*(left_edge+histogram[0])*delta
norm += 0.25*(right_edge+histogram[-1])*delta
water_level_up = np.max(histogram)*1.0
water_level_down = np.min(histogram)*1.0
top = 0.
iterations = 0
while (abs((top/norm)-level) > 0.0001) and not failed:
top = 0.
water_level = (water_level_up + water_level_down)/2.
#ontop = [elem for elem in histogram if elem > water_level]
indices = [i for i in range(len(histogram))
if histogram[i] > water_level]
# check for multimodal posteriors
if ((indices[-1]-indices[0]+1) != len(indices)):
warnings.warn(
"could not derive minimum credible intervals " +
"for this multimodal posterior")
warnings.warn(
"please try running longer chains or reducing " +
"the number of bins with --bins BINS (default: 20)")
failed = True
break
top = (np.sum(histogram[indices]) -
0.5*(histogram[indices[0]]+histogram[indices[-1]]))*(delta)
# left
if indices[0] > 0:
top += (0.5*(water_level+histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-histogram[indices[0]-1]))
else:
if (left_edge > water_level):
top += 0.25*(left_edge+histogram[indices[0]])*delta
else:
top += (0.25*(water_level + histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-left_edge))
# right
if indices[-1] < (len(histogram)-1):
top += (0.5*(water_level + histogram[indices[-1]]) *
delta*(histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-histogram[indices[-1]+1]))
else:
if (right_edge > water_level):
top += 0.25*(right_edge+histogram[indices[-1]])*delta
else:
top += (0.25*(water_level + histogram[indices[-1]]) *
delta * (histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-right_edge))
if top/norm >= level:
water_level_down = water_level
else:
water_level_up = water_level
# safeguard, just in case
iterations += 1
if (iterations > 1000):
warnings.warn(
"the loop to check for sigma deviations was " +
"taking too long to converge")
failed = True
break
# min
if failed:
bounds[j][0] = np.nan
elif indices[0] > 0:
bounds[j][0] = bincenters[indices[0]] - delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-histogram[indices[0]-1])
else:
if (left_edge > water_level):
bounds[j][0] = bincenters[0]-0.5*delta
else:
bounds[j][0] = bincenters[indices[0]] - 0.5*delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-left_edge)
# max
if failed:
bounds[j][1] = np.nan
elif indices[-1] < (len(histogram)-1):
bounds[j][1] = bincenters[indices[-1]] + delta*(histogram[indices[-1]]-water_level)/(histogram[indices[-1]]-histogram[indices[-1]+1])
else:
if (right_edge > water_level):
bounds[j][1] = bincenters[-1]+0.5*delta
else:
bounds[j][1] = bincenters[indices[-1]] + \
0.5*delta*(histogram[indices[-1]]-water_level) / \
(histogram[indices[-1]]-right_edge)
j += 1
for elem in bounds:
for j in (0, 1):
elem[j] -= info.mean[info.native_index]
return bounds
def write_h(info_file, indices, name, string, quantity, modifiers=None):
"""
Write one horizontal line of output
"""
info_file.write('\n '+name+'\t: ')
for i in indices:
info_file.write(string % quantity[i]+'\t')
def cubic_interpolation(info, hist, bincenters):
"""
Small routine to accomodate the absence of the interpolate module
"""
# we start from a try becuase if anything goes wrong, we want to return the raw histogram rather than nothing
try:
# test that all elements are strictly positive, otherwise we could not take the log, and we must switch to the robust method
for i,elem in enumerate(hist):
if elem == 0.:
hist[i] = 1.e-99
elif elem <0:
print hist[i]
raise exception()
# One of our methods (using polyfit) does assume that the input histogram has a maximum value of 1.
# If in a future version this is not guaranteedanymore, we should renormalise it here.
# This is important for computing weights and thresholds.
# The threshold below which the likelihood will be
# approximated as zero is hard-codeed here (could become an
# input parameter but that would not clearly be useful).:
threshold = 1.e-3
# prepare the interpolation on log(Like):
ln_hist = np.log(hist)
# define a finer grid on a wider range (assuming that the following method is fine both for inter- and extra-polation)
left = max(info.boundaries[info.native_index][0],bincenters[0]-2.5*(bincenters[1]-bincenters[0]))
right = min(info.boundaries[info.native_index][1],bincenters[-1]+2.5*(bincenters[-1]-bincenters[-2]))
interp_grid = np.linspace(left, right, (len(bincenters)+4)*10+1)
######################################
# polynomial fit method (default): #
#####################################W
if info.posterior_smoothing >= 2:
# the points in the histogram with a very low likelihood (i.e. hist[i]<<1 hist is normalised to a maximum of one)
# have a lot of Poisson noise and are unreliable. However, if we do nothing, they may dominate the outcome of the fitted polynomial.
# Hence we can:
# 1) give them less weight (weight = sqrt(hist) seems to work well)
# 2) cut them at some threshold value and base the fit only on higher points
# 3) both
# the one working best seems to be 2). We also wrote 1) below, but copmmented out.
# method 1):
#f = np.poly1d(np.polyfit(bincenters,ln_hist,info.posterior_smoothing,w=np.sqrt(hist)))
#interp_hist = f(interp_grid)
# method 2):
# find index values such that hist is negligble everywhere excepted in hist[sub_indices[0]], hist[sub_indices[-1]]
sub_indices = [i for i,elem in enumerate(hist) if elem > threshold]
# The interpolation is done precisely in this range: hist[sub_indices[0]] < x < hist[sub_indices[-1]]
g = np.poly1d(np.polyfit(bincenters[sub_indices],ln_hist[sub_indices],info.posterior_smoothing)) #,w=np.sqrt(hist[sub_indices])))
# The extrapolation is done in a range including one more bin on each side, excepted when the boundarty is hit
extrapolation_range_left = [info.boundaries[info.native_index][0] if sub_indices[0] == 0 else bincenters[sub_indices[0]-1]]
extrapolation_range_right = [info.boundaries[info.native_index][1] if sub_indices[-1] == len(hist)-1 else bincenters[sub_indices[-1]+1]]
# outisde of this range, log(L) is brutally set to a negligible value,e, log(1.e-10)
interp_hist = [g(elem) if (elem > extrapolation_range_left and elem < extrapolation_range_right) else np.log(1.e-10) for elem in interp_grid]
elif info.posterior_smoothing<0:
raise io_mp.AnalyzeError(
"You passed --posterior-smoothing %d, this value is not understood"%info.posterior_smoothing)
############################################################
# other methods: #
# - linear inter/extra-polation if posterior_smoothing = 0 #
# - cubic inter/extra-polation if posterior_smoothing = 0 #
############################################################
else:
# try first inter/extra-polation
try:
# prepare to interpolate and extrapolate:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear', fill_value='extrapolate')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic', fill_value='extrapolate')
interp_hist = f(interp_grid)
# failure probably caused by old scipy not having the fill_value='extrapolate' argument. Then, only interpoolate.
except:
# define a finer grid but not a wider one
left = max(info.boundaries[info.native_index][0],bincenters[0])
right = min(info.boundaries[info.native_index][1],bincenters[-1])
interp_grid = np.linspace(left, right, len(bincenters)*10+1)
# prepare to interpolate only:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic')
interp_hist = f(interp_grid)
# final steps used b y all methods
# go back from ln_Like to Like
interp_hist = np.exp(interp_hist)
# re-normalise the interpolated curve
interp_hist = interp_hist / interp_hist.max()
return interp_hist, interp_grid
except:
# we will end up here if anything went wrong before
# do nothing (raw histogram)
warnings.warn(
"The 1D posterior could not be processed normally, probably" +
"due to incomplete or obsolete numpy and/or scipy versions." +
"So the raw histograms will be plotted.")
return hist, bincenters
def write_histogram(hist_file_name, x_centers, hist):
"""
Store the posterior distribution to a file
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# 1d posterior distribution\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# Histogram\n")
hist_file.write(", ".join(
[str(elem) for elem in hist])+"\n")
print 'wrote ', hist_file_name
def read_histogram(histogram_path):
"""
Recover a stored 1d posterior
"""
with open(histogram_path, 'r') as hist_file:
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = [float(elem) for elem in
hist_file.next().split(",")]
x_centers = np.array(x_centers)
hist = np.array(hist)
return x_centers, hist
def write_histogram_2d(hist_file_name, x_centers, y_centers, extent, hist):
"""
Store the histogram information to a file, to plot it later
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# Interpolated histogram\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# y_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in y_centers])+"\n")
hist_file.write("\n# Extent\n")
hist_file.write(", ".join(
[str(elem) for elem in extent])+"\n")
hist_file.write("\n# Histogram\n")
for line in hist:
hist_file.write(", ".join(
[str(elem) for elem in line])+"\n")
def read_histogram_2d(histogram_path):
"""
Read the histogram information that was stored in a file.
To use it, call something like this:
.. code::
x_centers, y_centers, extent, hist = read_histogram_2d_from_file(path)
fig, ax = plt.subplots()
ax.contourf(
y_centers, x_centers, hist, extent=extent,
levels=ctr_level(hist, [0.68, 0.95]),
zorder=5, cma=plt.cm.autumn_r)
plt.show()
"""
with open(histogram_path, 'r') as hist_file:
length = 0
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
length = len(x_centers)
elif line.find("# y_centers") != -1:
y_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Extent") != -1:
extent = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = []
for index in range(length):
hist.append([float(elem) for elem in
hist_file.next().split(",")])
x_centers = np.array(x_centers)
y_centers = np.array(y_centers)
extent = np.array(extent)
hist = np.array(hist)
return x_centers, y_centers, extent, hist
def clean_conversion(module_name, tag, folder):
"""
Execute the methods "convert" from the different sampling algorithms
Returns True if something was made, False otherwise
"""
has_module = False
subfolder_name = tag+"_subfolder"
try:
module = importlib.import_module(module_name)
subfolder = getattr(module, subfolder_name)
has_module = True
except ImportError:
# The module is not installed, the conversion can not take place
pass
if has_module and os.path.isdir(folder):
# Remove any potential trailing slash
folder = os.path.join(
*[elem for elem in folder.split(os.path.sep) if elem])
if folder.split(os.path.sep)[-1] == subfolder:
try:
getattr(module, 'from_%s_output_to_chains' % tag)(folder)
except IOError:
raise io_mp.AnalyzeError(
"You asked to analyze a %s folder which " % tag +
"seems to come from an unfinished run, or to be empty " +
"or corrupt. Please make sure the run went smoothly " +
"enough.")
warnings.warn(
"The content of the %s subfolder has been " % tag +
"translated for Monte Python. Please run an "
"analysis of the entire folder now.")
return True
else:
return False
def separate_files(files):
"""
Separate the input files in folder
Given all input arguments to the command line files entry, separate them in
a list of lists, grouping them by folders. The number of identified folders
will determine the number of information instances to create
"""
final_list = []
temp = [files[0]]
folder = (os.path.dirname(files[0]) if os.path.isfile(files[0])
else files[0])
if len(files) > 1:
for elem in files[1:]:
new_folder = (os.path.dirname(elem) if os.path.isfile(elem)
else elem)
if new_folder == folder:
temp.append(elem)
else:
folder = new_folder
final_list.append(temp)
temp = [elem]
final_list.append(temp)
return final_list
def recover_folder_and_files(files):
"""
Distinguish the cases when analyze is called with files or folder
Note that this takes place chronologically after the function
`separate_files`"""
# The following list defines the substring that a chain should contain for
# the code to recognise it as a proper chain.
substrings = ['.txt', '__']
limit = 10
# If the first element is a folder, grab all chain files inside
if os.path.isdir(files[0]):
folder = os.path.normpath(files[0])
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and all([x in elem for x in substrings])]
# Otherwise, extract the folder from the chain file-name.
else:
# If the name is completely wrong, say it
if not os.path.exists(files[0]):
raise io_mp.AnalyzeError(
"You provided a non-existant folder/file to analyze")
folder = os.path.relpath(
os.path.dirname(os.path.realpath(files[0])), os.path.curdir)
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if os.path.join(folder, elem) in np.copy(files)
and not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and all([x in elem for x in substrings])]
basename = os.path.basename(folder)
return folder, files, basename
def extract_array(line):
"""
Return the array on the RHS of the line
>>> extract_array("toto = ['one', 'two']\n")
['one', 'two']
>>> extract_array('toto = ["one", 0.2]\n')
['one', 0.2]
"""
# Recover RHS of the equal sign, and remove surrounding spaces
rhs = line.split('=')[-1].strip()
# Remove array signs
rhs = rhs.strip(']').lstrip('[')
# Recover each element of the list
sequence = [e.strip().strip('"').strip("'") for e in rhs.split(',')]
for index, elem in enumerate(sequence):
try:
sequence[index] = int(elem)
except ValueError:
try:
sequence[index] = float(elem)
except ValueError:
pass
return sequence
def extract_dict(line):
"""
Return the key and value of the dictionary element contained in line
>>> extract_dict("something['toto'] = [0, 1, 2, -2, 'cosmo']")
'toto', [0, 1, 2, -2, 'cosmo']
"""
# recovering the array
sequence = extract_array(line)
# Recovering only the LHS
lhs = line.split('=')[0].strip()
# Recovering the name from the LHS
name = lhs.split('[')[-1].strip(']')
name = name.strip('"').strip("'")
return name, sequence
def extract_parameter_names(info):
"""
Reading the log.param, store in the Information instance the names
"""
backup_names = []
plotted_parameters = []
boundaries = []
ref_names = []
tex_names = []
scales = []
with open(info.param_path, 'r') as param:
for line in param:
if line.find('#') == -1:
if line.find('data.experiments') != -1:
info.experiments = extract_array(line)
if line.find('data.parameters') != -1:
name, array = extract_dict(line)
original = name
# Rename the names according the .extra file (opt)
if name in info.to_change.iterkeys():
name = info.to_change[name]
# If the name corresponds to a varying parameter (fourth
# entry in the initial array being non-zero, or a derived
# parameter (could be designed as fixed, it does not make
# any difference)), then continue the process of analyzing.
if array[3] != 0 or array[5] == 'derived':
# The real name is always kept, to have still the class
# names in the covmat
backup_names.append(original)
# With the list "to_plot", we can potentially restrict
# the variables plotted. If it is empty, though, simply
# all parameters will be plotted.
if info.to_plot == []:
plotted_parameters.append(name)
else:
if name in info.to_plot:
plotted_parameters.append(name)
# Append to the boundaries array
boundaries.append([
None if elem == 'None' or (isinstance(elem, int)
and elem == -1)
else elem for elem in array[1:3]])
ref_names.append(name)
# Take care of the scales
scale = array[4]
rescale = 1.
if name in info.new_scales.iterkeys():
scale = info.new_scales[name]
rescale = info.new_scales[name]/array[4]
scales.append(rescale)
# Given the scale, decide for the pretty tex name
number = 1./scale
tex_names.append(
io_mp.get_tex_name(name, number=number))
scales = np.diag(scales)
info.ref_names = ref_names
info.tex_names = tex_names
info.boundaries = boundaries
info.backup_names = backup_names
info.scales = scales
# Beware, the following two numbers are different. The first is the total
# number of parameters stored in the chain, whereas the second is for
# plotting purpose only.
info.number_parameters = len(ref_names)
info.plotted_parameters = plotted_parameters
def find_maximum_of_likelihood(info):
"""
Finding the global maximum of likelihood
min_minus_lkl will be appended with all the maximum likelihoods of files,
then will be replaced by its own maximum. This way, the global
maximum likelihood will be used as a reference, and not each chain's
maximum.
"""
min_minus_lkl = []
for chain_file in info.files:
# cheese will brutally contain everything (- log likelihood) in the
# file chain_file being scanned.
# This could potentially be faster with pandas, but is already quite
# fast
#
# This would read the chains including comment lines:
#cheese = (np.array([float(line.split()[1].strip())
# for line in open(chain_file, 'r')]))
#
# This reads the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([float(line.split()[1].strip())
for line in ifilterfalse(iscomment,f)]))
try:
min_minus_lkl.append(cheese[:].min())
except ValueError:
pass
# beware, it is the min because we are talking about
# '- log likelihood'
# Selecting only the true maximum.
try:
min_minus_lkl = min(min_minus_lkl)
except ValueError:
raise io_mp.AnalyzeError(
"No decently sized chain was found in the desired folder. " +
"Please wait to have more accepted point before trying " +
"to analyze it.")
info.min_minus_lkl = min_minus_lkl
def remove_bad_points(info):
"""
Create an array with all the points from the chains, after removing non-markovian, burn-in and fixed fraction
"""
# spam will brutally contain all the chains with sufficient number of
# points, after the burn-in was removed.
spam = list()
# Recover the longest file name, for pleasing display
max_name_length = max([len(e) for e in info.files])
# Total number of steps done:
steps = 0
accepted_steps = 0
# Open the log file
log = open(info.log_path, 'w')
for index, chain_file in enumerate(info.files):
# To improve presentation, and print only once the full path of the
# analyzed folder, we recover the length of the path name, and
# create an empty complementary string of this length
total_length = 18+max_name_length
empty_length = 18+len(os.path.dirname(chain_file))+1
basename = os.path.basename(chain_file)
if index == 0:
exec "print '--> Scanning file %-{0}s' % chain_file,".format(
max_name_length)
else:
exec "print '%{0}s%-{1}s' % ('', basename),".format(
empty_length, total_length-empty_length)
# cheese will brutally contain everything in the chain chain_file being
# scanned
#
# This would read the chains including comment lines:
#cheese = (np.array([[float(elem) for elem in line.split()]
# for line in open(chain_file, 'r')]))
#
# This read the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([[float(elem) for elem in line.split()]
for line in ifilterfalse(iscomment,f)]))
# If the file contains a broken line with a different number of
# elements, the previous array generation might fail, and will not have
# the correct shape. Hence the following command will fail. To avoid
# that, the error is caught.
try:
local_min_minus_lkl = cheese[:, 1].min()
except IndexError:
raise io_mp.AnalyzeError(
"Error while scanning %s." % chain_file +
" This file most probably contains "
"an incomplete line, rendering the analysis impossible. "
"I think that the following line(s) is(are) wrong:\n %s" % (
'\n '.join(
['-> %s' % line for line in
open(chain_file, 'r') if
len(line.split()) != len(info.backup_names)+2])))
line_count = float(sum(1 for line in open(chain_file, 'r')))
# Logging the information obtained until now.
number_of_steps = cheese[:, 0].sum()
log.write("%s\t " % os.path.basename(chain_file))
log.write(" Number of steps:%d\t" % number_of_steps)
log.write(" Steps accepted:%d\t" % line_count)
log.write(" acc = %.2g\t" % (float(line_count)/number_of_steps))
log.write("min(-loglike) = %.2f\n" % local_min_minus_lkl)
steps += number_of_steps
accepted_steps += line_count
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
info.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
info.update = 0
# Removing non-markovian part, burn-in, and fraction= (1 - keep-fraction)
start = 0
markovian=0
try:
# Read all comments in chains about times when proposal was updated
# The last of these comments gives the number of lines to be skipped in the files
if info.markovian and not info.update:
with open(chain_file, 'r') as f:
for line in ifilter(iscomment,f):
start = int(line.split()[2])
markovian = start
# Remove burn-in, defined as all points until the likelhood reaches min_minus_lkl+LOG_LKL_CUTOFF
while cheese[start, 1] > info.min_minus_lkl+LOG_LKL_CUTOFF:
start += 1
burnin = start-markovian
# Remove fixed fraction as requested by user (usually not useful if non-markovian is also removed)
if info.keep_fraction < 1:
start = start + int((1.-info.keep_fraction)*(line_count - start))
print ": Removed",
if info.markovian:
print "%d non-markovian points," % markovian,
print "%d points of burn-in," % burnin,
if info.keep_fraction < 1:
print "and first %.0f percent," % (100.*(1-info.keep_fraction)),
print "keep %d steps" % (line_count-start)
except IndexError:
print ': Removed everything: chain not converged'
# ham contains cheese without the burn-in, if there are any points
# left (more than 5)
if np.shape(cheese)[0] > start+5:
ham = np.copy(cheese[int(start)::])
# Deal with single file case
if len(info.files) == 1:
warnings.warn("Convergence computed for a single file")
bacon = np.copy(cheese[::3, :])
egg = np.copy(cheese[1::3, :])
sausage = np.copy(cheese[2::3, :])
spam.append(bacon)
spam.append(egg)
spam.append(sausage)
continue
# Adding resulting table to spam
spam.append(ham)
# Test the length of the list
if len(spam) == 0:
raise io_mp.AnalyzeError(
"No decently sized chain was found. " +
"Please wait a bit to analyze this folder")
# Applying now new rules for scales, if the name is contained in the
# referenced names
for name in info.new_scales.iterkeys():
try:
index = info.ref_names.index(name)
for i in xrange(len(spam)):
spam[i][:, index+2] *= 1./info.scales[index, index]
except ValueError:
# there is nothing to do if the name is not contained in ref_names
pass
info.steps = steps
info.accepted_steps = accepted_steps
return spam
def compute_mean(mean, spam, total):
"""
"""
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
submean = np.sum(spam[j][:, 0]*spam[j][:, i+2])
mean[j+1, i] = submean / total[j+1]
mean[0, i] += submean
mean[0, i] /= total[0]
def compute_variance(var, mean, spam, total):
"""
"""
for i in xrange(np.shape(var)[1]):
for j in xrange(len(spam)):
var[0, i] += np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[0, i])**2)
var[j+1, i] = np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[j+1, i])**2) / \
(total[j+1]-1)
var[0, i] /= (total[0]-1)
def compute_covariance_matrix(info):
"""
"""
covar = np.zeros((len(info.ref_names), len(info.ref_names)))
for i in xrange(len(info.ref_names)):
for j in xrange(i, len(info.ref_names)):
covar[i, j] = (
info.chain[:, 0]*(
(info.chain[:, i+2]-info.mean[i]) *
(info.chain[:, j+2]-info.mean[j]))).sum()
if i != j:
covar[j, i] = covar[i, j]
covar /= info.total
# Removing scale factors in order to store true parameter covariance
covar = np.dot(info.scales.T, np.dot(covar, info.scales))
return covar
def adjust_ticks(param, information_instances):
"""
"""
if len(information_instances) == 1:
return
# Recovering all x_range and ticks entries from the concerned information
# instances
x_ranges = []
ticks = []
for info in information_instances:
if not info.ignore_param:
x_ranges.append(info.x_range[info.native_index])
ticks.append(info.ticks[info.native_index])
# The new x_range and tick should min/max all the existing ones
new_x_range = np.array(
[min([e[0] for e in x_ranges]), max([e[1] for e in x_ranges])])
temp_ticks = np.array(
[min([e[0] for e in ticks]), max([e[-1] for e in ticks])])
new_ticks = np.linspace(temp_ticks[0],
temp_ticks[1],
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = new_x_range
info.ticks[info.native_index] = new_ticks
def store_contour_coordinates(info, name1, name2, contours):
"""docstring"""
file_name = os.path.join(
info.folder, 'plots', '{0}_2d_{1}-{2}.dat'.format(
info.basename, name1, name2))
with open(file_name, 'w') as plot_file:
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[1]))
for elem in contours.collections[0].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
# stop to not include the inner contours
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[0]))
for elem in contours.collections[1].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
def iscomment(s):
"""
Define what we call a comment in MontePython chain files
"""
return s.startswith('#')
class Information(object):
"""
Hold all information for analyzing runs
"""
# Counting the number of instances, to choose the color map
_ids = count(0)
# Flag checking the absence or presence of the interp1d function
has_interpolate_module = False
# Actual pairs of colors used by MP.
# For each pair, the first color is for the 95% contour,
# and the second for the 68% contour + the 1d probability.
# Note that, as with the other customisation options, you can specify new
# values for this in the extra plot_file.
MP_color = {
'Red':['#E37C80','#CE121F'],
'Blue':['#7A98F6','#1157EF'],
'Green':['#88B27A','#297C09'],
'Orange':['#F3BE82','#ED920F'],
'Grey':['#ABABAB','#737373'],
'Purple':['#B87294','#88004C']
}
# order used when several directories are analysed
MP_color_cycle = [
MP_color['Red'],
MP_color['Blue'],
MP_color['Green'],
MP_color['Orange'],
MP_color['Grey'],
MP_color['Purple']
]
# in the same order, list of transparency levels
alphas = [0.9, 0.9, 0.9, 0.9, 0.9, 0.9]
def __init__(self, command_line, other=None):
"""
The following initialization creates the three tables that can be
customized in an extra plot_file (see :mod:`parser_mp`).
Parameters
----------
command_line : Namespace
it contains the initialised command line arguments
"""
self.to_change = {}
"""
Dictionary whose keys are the old parameter names, and values are the
new ones. For instance :code:`{'beta_plus_lambda':'beta+lambda'}`
"""
self.to_plot = []
"""
Array of names of parameters to plot. If left empty, all will be
plotted.
.. warning::
If you changed a parameter name with :attr:`to_change`, you need to
give the new name to this array
"""
self.new_scales = {}
"""
Dictionary that redefines some scales. The keys will be the parameter
name, and the value its scale.
"""
# Assign a unique id to this instance
self.id = self._ids.next()
# Defining the sigma contours (1, 2 and 3-sigma)
self.levels = np.array([68.26, 95.4, 99.7])/100.
# Follows a bunch of initialisation to provide default members
self.ref_names, self.backup_names = [], []
self.scales, self.plotted_parameters = [], []
self.spam = []
# Store directly all information from the command_line object into this
# instance, except the protected members (begin and end with __)
for elem in dir(command_line):
if elem.find('__') == -1:
setattr(self, elem, getattr(command_line, elem))
# initialise the legend flags
self.plot_legend_1d = None
self.plot_legend_2d = None
# initialize the legend size to be the same as fontsize, but can be
# altered in the extra file
self.legendsize = self.fontsize
self.legendnames = []
# initialize the customisation script flags
self.custom1d = []
self.custom2d = []
# initialise the dictionary enmforcing limit
self.force_limits = {}
# Read a potential file describing changes to be done for the parameter
# names, and number of paramaters plotted (can be let empty, all will
# then be plotted), but also the style of the plot. Note that this
# overrides the command line options
if command_line.optional_plot_file:
plot_file_vars = {'info': self,'plt': plt}
execfile(command_line.optional_plot_file, plot_file_vars)
# check and store keep_fraction
if command_line.keep_fraction<=0 or command_line.keep_fraction>1:
raise io_mp.AnalyzeError("after --keep-fraction you should pass a float >0 and <=1")
self.keep_fraction = command_line.keep_fraction
def remap_parameters(self, spam):
"""
Perform substitutions of parameters for analyzing
.. note::
for arbitrary combinations of parameters, the prior will not
necessarily be flat.
"""
if hasattr(self, 'redefine'):
for key, value in self.redefine.iteritems():
# Check that the key was an original name
if key in self.backup_names:
print ' /|\ Transforming', key, 'into', value
# We recover the indices of the key
index_to_change = self.backup_names.index(key)+2
print('/_o_\ The new variable will be called ' +
self.ref_names[self.backup_names.index(key)])
# Recover all indices of all variables present in the
# remapping
variable_names = [elem for elem in self.backup_names if
value.find(elem) != -1]
indices = [self.backup_names.index(name)+2 for name in
variable_names]
# Now loop over all files in spam
for i in xrange(len(spam)):
# Assign variables to their values
for index, name in zip(indices, variable_names):
exec("%s = spam[i][:, %i]" % (name, index))
# Assign to the desired index the combination
exec("spam[i][:, %i] = %s" % (index_to_change, value))
def define_ticks(self):
"""
"""
self.max_values = self.chain[:, 2:].max(axis=0)
self.min_values = self.chain[:, 2:].min(axis=0)
self.span = (self.max_values-self.min_values)
# Define the place of ticks, given the number of ticks desired, stored
# in conf.ticknumber
self.ticks = np.array(
[np.linspace(self.min_values[i]+self.span[i]*0.1,
self.max_values[i]-self.span[i]*0.1,
self.ticknumber) for i in range(len(self.span))])
# Define the x range (ticks start not exactly at the range boundary to
# avoid display issues)
self.x_range = np.array((self.min_values, self.max_values)).T
# In case the exploration hit a boundary (as defined in the parameter
# file), at the level of precision defined by the number of bins, the
# ticks and x_range should be altered in order to display this
# meaningful number instead.
for i in range(np.shape(self.ticks)[0]):
x_range = self.x_range[i]
bounds = self.boundaries[i]
# Left boundary
if bounds[0] is not None:
if abs(x_range[0]-bounds[0]) < self.span[i]/self.bins:
self.ticks[i][0] = bounds[0]
self.x_range[i][0] = bounds[0]
# Right boundary
if bounds[-1] is not None:
if abs(x_range[-1]-bounds[-1]) < self.span[i]/self.bins:
self.ticks[i][-1] = bounds[-1]
self.x_range[i][-1] = bounds[-1]
def write_information_files(self):
# Store in info_names only the tex_names that were plotted, for this
# instance, and in indices the corresponding list of indices. It also
# removes the $ signs, for clarity
self.info_names = [
name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.indices = [self.tex_names.index(name) for name in self.info_names]
self.tex_names = [name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.info_names = [name.replace('$', '') for name in self.info_names]
# Define the bestfit array
self.bestfit = np.zeros(len(self.ref_names))
for i in xrange(len(self.ref_names)):
self.bestfit[i] = self.chain[self.sorted_indices[0], :][2+i]
# Write down to the .h_info file all necessary information
self.write_h_info()
self.write_v_info()
self.write_tex()
def write_h_info(self):
with open(self.h_info_path, 'w') as h_info:
h_info.write(' param names\t: ')
for name in self.info_names:
h_info.write("%-14s" % name)
write_h(h_info, self.indices, 'R-1 values', '% .6f', self.R)
write_h(h_info, self.indices, 'Best Fit ', '% .6e', self.bestfit)
write_h(h_info, self.indices, 'mean ', '% .6e', self.mean)
write_h(h_info, self.indices, 'sigma ', '% .6e',
(self.bounds[:, 0, 1]-self.bounds[:, 0, 0])/2.)
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma - ', '% .6e',
self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma + ', '% .6e',
self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma - ', '% .6e',
self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma + ', '% .6e',
self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma - ', '% .6e',
self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma + ', '% .6e',
self.bounds[:, 2, 1])
# bounds
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma > ', '% .6e',
self.mean+self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma < ', '% .6e',
self.mean+self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma > ', '% .6e',
self.mean+self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma < ', '% .6e',
self.mean+self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma > ', '% .6e',
self.mean+self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma < ', '% .6e',
self.mean+self.bounds[:, 2, 1])
def write_v_info(self):
"""Write vertical info file"""
with open(self.v_info_path, 'w') as v_info:
v_info.write('%-15s\t: %-11s' % ('param names', 'R-1'))
v_info.write(' '.join(['%-11s' % elem for elem in [
'Best fit', 'mean', 'sigma', '1-sigma -', '1-sigma +',
'2-sigma -', '2-sigma +', '1-sigma >', '1-sigma <',
'2-sigma >', '2-sigma <']]))
for index, name in zip(self.indices, self.info_names):
v_info.write('\n%-15s\t: % .4e' % (name, self.R[index]))
v_info.write(' '.join(['% .4e' % elem for elem in [
self.bestfit[index], self.mean[index],
(self.bounds[index, 0, 1]-self.bounds[index, 0, 0])/2.,
self.bounds[index, 0, 0], self.bounds[index, 0, 1],
self.bounds[index, 1, 0], self.bounds[index, 1, 1],
self.mean[index]+self.bounds[index, 0, 0],
self.mean[index]+self.bounds[index, 0, 1],
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]]]))
def write_tex(self):
"""Write a tex table containing the main results """
with open(self.tex_path, 'w') as tex:
tex.write("\\begin{tabular}{|l|c|c|c|c|} \n \\hline \n")
tex.write("Param & best-fit & mean$\pm\sigma$ ")
tex.write("& 95\% lower & 95\% upper \\\\ \\hline \n")
for index, name in zip(self.indices, self.tex_names):
tex.write("%s &" % name)
tex.write("$%.4g$ & $%.4g_{%.2g}^{+%.2g}$ " % (
self.bestfit[index], self.mean[index],
self.bounds[index, 0, 0], self.bounds[index, 0, 1]))
tex.write("& $%.4g$ & $%.4g$ \\\\ \n" % (
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]))
tex.write("\\hline \n \\end{tabular} \\\\ \n")
tex.write("$-\ln{\cal L}_\mathrm{min} =%.6g$, " % (
self.min_minus_lkl))
tex.write("minimum $\chi^2=%.4g$ \\\\ \n" % (
self.min_minus_lkl*2.))
| baudren/montepython_public | montepython/analyze.py | Python | mit | 93,542 | [
"Gaussian"
] | 9b0ace6d2080c8a8a9cbb78a0b1aadb7a507975ba1f4cd2dd24ba172a1c21f6e |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os.path
from commoncode.testcase import FileBasedTesting
from cluecode_assert_utils import check_detection
class TestYears(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_years_hostpad(self):
test_file = self.get_test_loc('years/years_hostpad-hostapd_cli_c.c')
expected = [
u'2004-2005',
u'2004-2005',
]
check_detection(expected, test_file, what='years')
def test_years_ifrename(self):
test_file = self.get_test_loc('years/years_ifrename-ifrename_c.c')
expected = [
u'2004',
]
check_detection(expected, test_file, what='years')
def test_years_in_c(self):
test_file = self.get_test_loc('years/years_in_c-c.c')
expected = [
u'2005',
u'2004',
u'2003',
u'2002',
u'2001',
u'2000',
u'1999',
u'1998',
u'1997',
u'1996',
]
check_detection(expected, test_file, what='years')
def test_years_in_copyright(self):
test_file = self.get_test_loc('years/years_in_copyright-COPYRIGHT_madwifi.madwifi')
expected = [
u'2002-2006',
]
check_detection(expected, test_file, what='years')
def test_years_in_h(self):
test_file = self.get_test_loc('years/years_in_h-ah_h.h')
expected = [
u'2002-2006',
]
check_detection(expected, test_file, what='years')
def test_years_in_license(self):
test_file = self.get_test_loc('years/years_in_license-COPYING_gpl.gpl')
expected = [
u'1989, 1991',
]
check_detection(expected, test_file, what='years')
def test_years_in_readme(self):
test_file = self.get_test_loc('years/years_in_readme-README')
expected = [
u'2002-2006',
]
check_detection(expected, test_file, what='years')
def test_years_in_txt(self):
test_file = self.get_test_loc('years/years_in_txt.txt')
expected = [
u'2005',
u'2004',
u'2003',
u'2002',
u'2001',
u'2000',
u'1999',
u'1998',
u'1997',
u'1996',
]
check_detection(expected, test_file, what='years')
def test_years_in_uuencode_binary(self):
test_file = self.get_test_loc('years/years_in_uuencode_binary-mips_be_elf_hal_o_uu.uu')
expected = [
u'2002-2006',
]
check_detection(expected, test_file, what='years')
| yasharmaster/scancode-toolkit | tests/cluecode/test_years.py | Python | apache-2.0 | 4,079 | [
"VisIt"
] | 816ffcc976afc0be70073dd884b42ac4bff489a8c9a1c7a19007bd4b8fe2a4e7 |
#!/usr/bin/env python
from __future__ import division, print_function
from collections import defaultdict
import sys
# OpenMM Imports
import simtk.openmm as mm
import simtk.openmm.app as app
import parmed as pmd
# ParmEd Imports
from parmed import load_file, unit as u
from parmed.charmm import CharmmParameterSet
from parmed.openmm import StateDataReporter, energy_decomposition_system
#params = CharmmParameterSet('parmed_par_oplsaam.inp')
top = load_file('topol.top', xyz='GMX_R.pdb')
#ala5_gas = load_file('GP.psf')
#ala5_crds = load_file('plt.pdb')
# Create the OpenMM system
print('Creating OpenMM System')
system = top.createSystem(nonbondedMethod=app.NoCutoff,nonbondedCutoff=1000.0*u.angstroms)
# Create the integrator to do Langevin dynamics
integrator = mm.LangevinIntegrator(
300*u.kelvin, # Temperature of heat bath
1.0/u.picoseconds, # Friction coefficient
2.0*u.femtoseconds, # Time step
)
# Define the platform to use; CUDA, OpenCL, CPU, or Reference. Or do not specify
# the platform to use the default (fastest) platform
platform = mm.Platform.getPlatformByName('Reference')
sim = app.Simulation(top.topology, system, integrator, platform)
sim.context.setPositions(top.positions)
# Minimize the energy
print('Minimizing energy')
struct=pmd.load_file('GMX_R.pdb')
ecomps=(pmd.openmm.energy_decomposition_system(struct,system))
tot_ene=0.0
for i in range(0,len(ecomps)):
tot_ene+=ecomps[i][1]
print(ecomps[i][0],ecomps[i][1])
print('Total-energy %6.6f'%tot_ene)
| leelasd/OPLS-AAM_for_Gromacs | GMX_TEST/DIP/R/test_OMM_GMX.py | Python | mit | 1,580 | [
"CHARMM",
"OpenMM"
] | 891d90a3fe8fbe9cf2098c72b54054732a3f87a7cf09e85c8bb00668f0163e47 |
from numpy.random import normal
from scipy.signal import gausspulse
from gwpy.timeseries import TimeSeries
# Generate a `TimeSeries` containing Gaussian noise sampled at 4096 Hz,
# centred on GPS time 0, with a sine-Gaussian pulse ('glitch') at
# 500 Hz:
noise = TimeSeries(normal(loc=1, size=4096*4), sample_rate=4096, epoch=-2)
glitch = TimeSeries(gausspulse(noise.times.value, fc=500) * 4, sample_rate=4096)
data = noise + glitch
# Compute and plot the Q-transform of these data:
q = data.q_transform()
plot = q.plot()
plot.set_xlim(-.2, .2)
plot.set_epoch(0)
plot.show()
| gwpy/gwpy.github.io | docs/0.8.1/api/gwpy-timeseries-TimeSeries-7.py | Python | gpl-3.0 | 579 | [
"Gaussian"
] | 9d0dcf500880207910619d43d5ccbf57948ed23962bbbc9060488de5c2e90985 |
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class PubChemFingerprint(MolecularFeaturizer):
"""PubChem Fingerprint.
The PubChem fingerprint is a 881 bit structural key,
which is used by PubChem for similarity searching.
Please confirm the details in [1]_.
References
----------
.. [1] ftp://ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.pdf
Notes
-----
This class requires RDKit and PubChemPy to be installed.
PubChemPy use REST API to get the fingerprint, so you need the internet access.
"""
def __init__(self):
"""Initialize this featurizer."""
try:
from rdkit import Chem # noqa
import pubchempy as pcp # noqa
except ModuleNotFoundError:
raise ImportError("This class requires PubChemPy to be installed.")
self.get_pubchem_compounds = pcp.get_compounds
def _featurize(self, mol: RDKitMol) -> np.ndarray:
"""
Calculate PubChem fingerprint.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
1D array of RDKit descriptors for `mol`. The length is 881.
"""
from rdkit import Chem
smiles = Chem.MolToSmiles(mol)
pubchem_compound = self.get_pubchem_compounds(smiles, 'smiles')[0]
feature = [int(bit) for bit in pubchem_compound.cactvs_fingerprint]
return np.asarray(feature)
| lilleswing/deepchem | deepchem/feat/molecule_featurizers/pubchem_fingerprint.py | Python | mit | 1,454 | [
"RDKit"
] | 7a276dde275b80765d60522570cf2a053e54d196819eaf8bf4613307509e5f83 |
#!/usr/bin/env python
# Copyright (C) 2012 Tianyang Li
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
"""
test uniformity of reads distribution on contigs using the
exact multinomial test
-b bowtie2 results
-l read length (assumed to be all equal)
-c assembled contigs
-t number of trials to use in test
"""
import sys
import getopt
import rpy2.robjects as robj
from rpy2.robjects.packages import importr
from Bio import SeqIO
import HTSeq
import datetime
def count_bowtie2_align(bowtie2, db_len):
for align in HTSeq.SAM_Reader(bowtie2):
if align.aligned:
if db_len[align.iv.chrom][0] > align.iv.start and align.iv.start >= 0:
db_len[align.iv.chrom][1][align.iv.start] += 1
db_len[align.iv.chrom][2] += 1
def get_fasta_len(fasta_file):
"""
get length of each entry in a fasta file,
return it as a dict
"""
fasta_len = {}
for rec in SeqIO.parse(fasta_file, 'fasta'):
# [length of seq (int), reads distribution (lsit), total reads count]
fasta_len[str(rec.id)] = [len(rec.seq), [0] * len(rec.seq), 0]
return fasta_len
def main(args):
bowtie2, read_len, contigs, trial, fout = None, None, None, None, None
try:
opts, args = getopt.getopt(args, 'b:l:c:t:o:')
except getopt.GetoptError as err:
print >> sys.stderr, str(err)
sys.exit(2)
for o, a in opts:
if o == '-b':
bowtie2 = a
if o == '-l':
read_len = int(a)
if o == '-c':
contigs = a
if o == '-t':
trial = int(a)
if o == '-o':
fout = a
if bowtie2 == None or read_len == None or contigs == None or trial == None:
print >> sys.stderr, "Missing options!"
sys.exit(2)
contigs_len = get_fasta_len(contigs)
count_bowtie2_align(bowtie2, contigs_len)
EMT = importr('EMT')
mt = EMT.multinomial_test
fout = open(fout, 'w')
for contig_entry, i in zip(contigs_len.items(), range(1, len(contigs_len) + 1)):
if contig_entry[1][2] > 2 and contig_entry[1][0] > read_len - 1:
print >> sys.stderr, "#%d: length: %d, reads: %d, time: %s" % (i, contig_entry[1][0], contig_entry[1][2], datetime.datetime.now())
pv = mt(robj.IntVector(contig_entry[1][1][:-(read_len - 1)]), robj.FloatVector([1 / float(contig_entry[1][0] - read_len + 1)] * (contig_entry[1][0] - read_len + 1)), MonteCarlo=True, ntrial=trial)
# contig name, contig length, reads count, p-value
fout.write("%s %d %d %f\n" % (contig_entry[0], contig_entry[1][0], contig_entry[1][2], pv[-1][0]))
fout.close()
if __name__ == '__main__':
main(sys.argv[1:])
| tianyang-li/meta-transcriptome | uniform-test/contig_bowtie2_uniform_test_multinomial.py | Python | gpl-3.0 | 3,255 | [
"HTSeq"
] | 5844bf537ba62da0ea94c7933e8cb73973b3b832349f6f072141a2e54390a089 |
"""
Test course update
"""
from uuid import uuid4
from bok_choy.web_app_test import WebAppTest
from regression.pages.studio.course_info_studio import CourseUpdatesPageExtended
from regression.tests.helpers.api_clients import LmsLoginApi
from regression.tests.helpers.utils import get_course_info
class CourseUpdateTest(WebAppTest):
"""
Test course update.
"""
def setUp(self):
super().setUp()
lms_login = LmsLoginApi()
lms_login.authenticate(self.browser)
self.course_info = get_course_info()
self.course_update_page = CourseUpdatesPageExtended(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_update_page.visit()
self.course_update_content_selector = '#course-update-list li' \
' .post-preview .update-contents'
self.course_update_text = f'New update:{uuid4().hex}'
def create_course_update(self):
"""
Create course update and verify
"""
self.course_update_page.open_new_update_form()
self.course_update_page.write_update_and_save(self.course_update_text)
# Assert course update has been created successfully.
self.assertEqual(
self.course_update_page.q(
css=self.course_update_content_selector)[0].text,
self.course_update_text
)
def test_course_update(self):
"""
Verifies creation, editing and deletion of course update
"""
# Delete any existing updates
self.course_update_page.delete_all_course_updates()
# Create course update
self.create_course_update()
# Edit course update
course_update_edit_text = f'Edited update:{uuid4().hex}'
# Edit the course update and save.
self.course_update_page.edit_course_update(course_update_edit_text)
# Verify that the edit has been saved correctly and is visible.
self.assertEqual(
self.course_update_page.q(
css=self.course_update_content_selector)[0].text,
course_update_edit_text
)
# Delete course update
self.course_update_page.delete_course_update()
# If there are no course updates present anymore
# then we assume that deletion was successful.
# If present then make sure the contents don't match.
if self.course_update_page.q(
css=self.course_update_content_selector).present:
self.assertNotEqual(
self.course_update_page.q(
css=self.course_update_content_selector)[0].text,
self.course_update_text
)
class CourseHandoutTest(WebAppTest):
"""
Test course handout
"""
def setUp(self):
super().setUp()
lms_login = LmsLoginApi()
lms_login.authenticate(self.browser)
self.course_info = get_course_info()
self.course_update_page = CourseUpdatesPageExtended(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_update_page.visit()
def test_edit_course_handout(self):
"""
Verifies that user can edit course handout
"""
course_handout_content = f'New handout content:{uuid4().hex}'
# Edit course handout
self.course_update_page.edit_course_handout(course_handout_content)
# Verify that the edit has been saved correctly and is visible.
self.assertEqual(
self.course_update_page.q(css='.handouts-content')[0].text,
course_handout_content
)
# Discard the update.
self.course_update_page.edit_course_handout("")
# Verify that the edit has been saved correctly and is visible.
self.assertEqual(
self.course_update_page.q(css='.handouts-content')[0].text,
""
)
| edx/edx-e2e-tests | regression/tests/studio/test_course_update.py | Python | agpl-3.0 | 4,097 | [
"VisIt"
] | 874041e943ddd47d45a14e4c97e8e59852b18007fdc98347350db02e461e72db |
from LOTlib.Miscellaneous import Infinity, normlogpdf
from math import isnan
class GaussianLikelihood(object):
def compute_single_likelihood(self, datum):
""" Compute the likelihood with a Gaussian. Wraps to avoid nan"""
ret = normlogpdf(self(*datum.input), datum.output, datum.ll_sd)
if isnan(ret):
return -Infinity
else:
return ret
class MultidimensionalGaussianLikelihood(object):
""" Assumes that the output is a vector and everything is normal
"""
def compute_single_likelihood(self, datum):
v = self(*datum.input)
ret = sum([normlogpdf(vi, di, datum.ll_sd) for vi, di in zip(v, datum.output)])
if isnan(ret):
return -Infinity
else:
return ret | piantado/LOTlib | LOTlib/Hypotheses/Likelihoods/GaussianLikelihood.py | Python | gpl-3.0 | 784 | [
"Gaussian"
] | 16e80e66fcadd6ad98cc0d65a3d49070f2fd58f33043ac2e63b5b18fb566574c |
# Copyright (C) 2017,2018,2019,2020 Luiz Felippe S. Rodrigues <luizfelippesr@alumni.usp.br>
#
# This file is part of GalMag.
#
# GalMag is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalMag is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalMag. If not, see <http://www.gnu.org/licenses/>.
#
"""
Contains a class for computing a selection of observables
"""
from B_generators.B_generator import B_generator
import electron_profiles as prof
import numpy as np
import util
class Observables(B_generator):
"""
Synchrotron emmission and Faraday rotation observables
Class properties compute and return a range of observables computed along
the axis specified in the initialization.
Parameters
----------
B_field : B_field
The magnetic field based on which the observables should be computed.
At the moment, only B_field constructed using a uniform cartesian grid
is supported
direction : str
The coordinate axis parallel to the line-of-sight (i.e. the axis along
which the integrations are performed).
Valid values: 'x'/'edge-on', 'y' or 'z'/'face-on'.
obs_electron_density_function : func
A function which receives three coordinate arrays r_spherical, theta, phi
and returns the electron density
obs_cosmic_ray_function : func
A function which receives three coordinate arrays r_spherical, theta, phi
and returns the cosmic ray density
obs_wavelength_m : float
The wavelength of used in the synchrotron emission calculations.
Default: 5 cm
obs_emissivivty_normalization : float
Needs to be adjusted
"""
def __init__(self, B_field, default_parameters={},
dtype=np.float64, direction=None, **kwargs):
if B_field.grid.grid_type != 'cartesian':
raise NotImplementedError('At the moment, only cartesian grids are supported.')
self.B_field = B_field
resolution = B_field.resolution.copy()
self.direction = direction
if direction == 'x' or direction == 'edge-on':
self._integration_axis = 0
self._Bp = self.B_field.x
self._depths = self.B_field.grid.x[:,0,0]
elif direction == 'y':
self._integration_axis = 1
self._Bp = self.B_field.y
self._depths = self.B_field.grid.y[0,:,0]
elif direction == 'z' or direction == 'face-on':
self._integration_axis = 2
self._Bp = self.B_field.z
self._depths = self.B_field.grid.z[0,0,:]
else:
raise NotImplementedError('Only "x", "y" and "z" directions are currently supported."')
self._ddepth = np.abs(self._depths[0]-self._depths[1])
resolution[self._integration_axis] = 1
super(Observables, self).__init__(box=B_field.box,
resolution=resolution,
default_parameters=default_parameters,
dtype=dtype)
# Reads in the parameters
self.parameters = self._parse_parameters(kwargs)
# Cached quantities dictionary
self._cache = {}
@property
def _builtin_parameter_defaults(self):
builtin_defaults = {
'obs_electron_density_function': prof.simple_ne, # n_e
'obs_electron_at_reference_radius': 1, # n_e0 [cm^-3]
'obs_cosmic_ray_function': prof.constant_ncr, # n_{cr} [cm^-3]
'obs_wavelength_m': 5e-2, # 1 cm
'obs_gamma': 1.0, # cm
'obs_emissivivty_normalization': 1, # This needs to be updated
}
return builtin_defaults
def get_B_field(self):
"""B_field based on which this Observables object was constructed"""
return self.B_field
@property
def synchrotron_emissivity(self):
r"""
Synchrotron emissivity along a coordinate axis
.. math::
\epsilon = C\, n_{cr} B_\perp^{(\gamma+1)/2)} \lambda^{(\gamma-1)/2}
Returns
-------
3D-d2o
Syncrotron emmissivity, along a coordinate axis
"""
if 'synchrotron_emissivity' not in self._cache:
lamb = self.parameters['obs_wavelength_m']
gamma = self.parameters['obs_gamma']
norm = self.parameters['obs_emissivivty_normalization']
self._cache['synchrotron_emissivity'] = \
self._compute_synchrotron_emissivity(lamb, gamma, norm)
return self._cache['synchrotron_emissivity']
def _compute_synchrotron_emissivity(self, lamb, gamma, norm):
r"""
Helper for synchrotron_emissivity
Parameters
----------
lamb : float
wavelength
gamma : float
spectral index of the cosmic ray energy distribution
norm : float
normalization of the emissivity
Returns
-------
synchrotron emissivity (along a given axis)
"""
if self.direction == 'x':
Bperp2 = self.B_field.y**2 + self.B_field.z**2
elif self.direction == 'y':
Bperp2 = self.B_field.x**2 + self.B_field.z**2
elif self.direction == 'z':
# Bperp^2 = Bx^2 + By^2
Bperp2 = self.B_field.x**2 + self.B_field.y**2
ncr = self.parameters['obs_cosmic_ray_function'](
self.B_field.grid.r_spherical,
self.B_field.grid.theta,
self.B_field.grid.phi)
return ncr * Bperp2**((gamma+1)/4) * lamb**((gamma-1)/2)
@property
def intrinsic_polarization_degree(self):
r"""
Intrinsic degree of polarization
.. math::
p_0 = \frac{\gamma+1}{\gamma + 7/3}
"""
if 'intrinsic_polarization_degree' not in self._cache:
gamma = self.parameters['obs_gamma']
self._cache['intrinsic_polarization_degree']=(gamma+1.0)/(gamma+7./3.)
return self._cache['intrinsic_polarization_degree']
@property
def intrinsic_polarization_angle(self):
r"""
Intrinsic polarization angle
.. math::
\psi_0 = \frac{\pi}{2} + \tan^{-1}\left(\frac{B_y}{B_x}\right)
"""
if 'intrinsic_polarization_angle' not in self._cache:
if self.direction == 'x':
B1 = self.B_field.z
B2 = self.B_field.y
elif self.direction == 'y':
B1 = self.B_field.x
B2 = self.B_field.z
elif self.direction == 'z':
B1 = self.B_field.y
B2 = self.B_field.x
else:
raise ValueError
psi0 = np.pi/2.0 + util.arctan2(B1,B2)
psi0[psi0>np.pi] = psi0[psi0>np.pi]-2.*np.pi
self._cache['intrinsic_polarization_angle'] = psi0
return self._cache['intrinsic_polarization_angle']
@property
def electron_density(self):
r"""
Thermal electron density evaluated on this grid used for calculations.
This is set through the parameter obs_electron_density_function,
chosen during initialization.
"""
if 'electron_density' not in self._cache:
# Gets local grid (aka beginning of d2o gymnastics)
local_r_sph_grid = self.B_field.grid.r_spherical.get_local_data()
local_theta_grid = self.B_field.grid.theta.get_local_data()
local_phi_grid = self.B_field.grid.phi.get_local_data()
ne0 = self.parameters['obs_electron_at_reference_radius']
# Evaluate obs_electron_density_function on the local grid
local_ne = self.parameters['obs_electron_density_function'](
local_r_sph_grid,
local_theta_grid,
local_phi_grid,
ne0=ne0)
# Initializes global array and set local data into a d2o
global_ne = self.B_field.grid.get_prototype(dtype=self.dtype)
global_ne.set_local_data(local_ne, copy=False)
self._cache['electron_density'] = global_ne
return self._cache['electron_density']
@property
def psi(self):
r"""
Polarization angle of radiation emitted at a given depth Faraday
rotated over the line of sight.
.. math::
\psi(z) = \psi_0(z)
+ 0.812\,{\rm rad}\left(\frac{\lambda}{1\,\rm m}\right)^2 \int_z^\infty
\left(\frac{n_e(z')}{1\,\rm cm^{-3}}\right)
\left(\frac{B_\parallel(z')}{1\,\mu\rm G}\right)
\frac{{\rm d} z'}{1\,\rm pc}
"""
if 'psi' not in self._cache:
lamb = self.parameters['obs_wavelength_m']
ne = self.electron_density
self._cache['psi'] = self._compute_psi(lamb, ne)
return self._cache['psi']
def _compute_psi(self, lamb, ne, from_bottom=False):
"""Computes the Faraday rotated polarization angle of radiation emmited
at each depth
Parameters
----------
lamb : float
Wavelength used for the computation (in meters)
ne : 3D d2o
Array containing the electron density in the galaxy
from_bottom : bool
Whether the observation is done "from bottom" (integration starting
from negative values) or "from top" (positive). Default: False
"""
Bp = self._Bp
ddepth = self._ddepth * 1000
axis = [slice(None),slice(None),slice(None)]
ax_n = self._integration_axis
slices = [slice(None),slice(None),slice(None)]
psi = self.intrinsic_polarization_angle.copy()
for i, depth in enumerate(self._depths):
axis[ax_n] = slice(i,i+1) # e.g. for z, this will select psi[:,:,i]
# The observer can be at the top or bottom
if not from_bottom:
# Will integrate from 0 to i; e.g. for z, Bp[:,:,0:i]
slices[ax_n] = slice(0,i)
else:
# Will integrate from i to the end
slices[ax_n] = slice(i,-1)
integrand = ne[slices] * Bp[slices] * ddepth
integral = integrand.sum(axis=ax_n)
# Adjust the axes and uses full data (to make sure to avoid mistakes)
integral = np.expand_dims(integral.get_full_data(), ax_n)
# psi(z) = psi0(z) + 0.812\lambda^2 \int_-\infty^z ne(z') Bpara(z') dz'
psi[axis] += 0.812*lamb**2 * integral
return psi
@property
def Stokes_I(self):
r"""
Stokes parameter I (total emmission)
.. math::
I(\lambda) = \int_{-\infty}^\infty \epsilon(w,\lambda) dw
where :math:`w` is the specified coordinate axis.
"""
if 'Stokes_I' not in self._cache:
self._cache['Stokes_I'] = self._compute_Stokes('I')
return self._cache['Stokes_I']
@property
def Stokes_Q(self):
r"""
Stokes parameter Q
.. math::
Q(\lambda) = \int_{-\infty}^\infty \epsilon(w,\lambda) p_0(w) \cos[2 \psi(w)] dw
where :math:`w` is the specified coordinate axis.
"""
if 'Stokes_Q' not in self._cache:
self._cache['Stokes_Q'] = self._compute_Stokes('Q')
return self._cache['Stokes_Q']
@property
def Stokes_U(self):
r"""
Stokes parameter U
.. math::
U(\lambda) = \int_{-\infty}^\infty \epsilon(w,\lambda) p_0(w) \sin[2 \psi(w)] dw
where :math:`w` is the specified coordinate axis.
"""
if 'Stokes_U' not in self._cache:
self._cache['Stokes_U'] = self._compute_Stokes('U')
return self._cache['Stokes_U']
def _compute_Stokes(self, parameter):
r"""
Computes Stokes parameters I, Q, U
.. math::
I(\lambda) = \int_{-\infty}^\infty \epsilon(w,\lambda) dw
.. math::
Q(\lambda) = \int_{-\infty}^\infty \epsilon(w,\lambda) p_0(w) \cos[2 \psi(w)] dw
.. math::
U(\lambda) = \int_{-\infty}^\infty \epsilon(w,\lambda) p_0(w) \sin[2 \psi(w)] dw
where :math:`w` is the specified coordinate axis.
Parameters
----------
parameter : str
Either 'I', 'Q' or 'U'
Returns
-------
The specified Stokes parameter
"""
emissivity = self.synchrotron_emissivity
# Computes the integrand
if parameter == 'I':
integrand = emissivity * self._ddepth
elif parameter == 'Q':
p0 = self.intrinsic_polarization_degree
cos2psi = util.distribute_function(np.cos, 2.0*self.psi)
integrand = emissivity * p0 * cos2psi * self._ddepth
elif parameter == 'U':
p0 = self.intrinsic_polarization_degree
sin2psi = util.distribute_function(np.sin, 2.0*self.psi)
integrand = emissivity * p0 * sin2psi * self._ddepth
else:
raise ValueError
# Sums/integrates along the specified axis and returns
return integrand.sum(axis=self._integration_axis)
@property
def observed_polarization_angle(self):
r"""
Observed integrated polarization angle
.. math::
\Psi = \frac{1}{2} \arctan\left(\frac{U}{Q}\right)
"""
if 'observed_polarization_angle' not in self._cache:
angle = 0.5 * util.arctan2(self.Stokes_U,self.Stokes_Q)
self._cache['observed_polarization_angle'] = angle
return self._cache['observed_polarization_angle']
@property
def polarized_intensity(self):
r"""
Polarized intensity
.. math::
P = \sqrt{Q^2 + U^2}
"""
if 'polarized_intensity' not in self._cache:
P = (self.Stokes_U**2 + self.Stokes_Q**2 )**0.5
self._cache['polarized_intensity'] = P
return self._cache['polarized_intensity']
@property
def rotation_measure(self):
r"""
Rotation measure
.. math::
RM = (0.812\,{\rm rad\,m}^{-2}) \int \frac{n_e(z)}{1\,\rm cm^{-3}}
\frac{B_z}{1\,\mu\rm G}
\frac{{\rm d} z}{1\,\rm pc}
"""
if 'RM' not in self._cache:
ne = self.electron_density
self._cache['RM'] = self._compute_RM(ne)
return self._cache['RM']
def _compute_RM(self, ne, from_bottom=False):
"""
Computes the Faraday rotation measure
Parameters
----------
ne : 3D d2o
Array containing the electron density in the galaxy (in cm^{-3})
"""
Bp = self._Bp
ddepth = self._ddepth * 1000 # Converts from
ax_n = self._integration_axis
integrand = ne * Bp * ddepth
return 0.812*integrand.sum(axis=ax_n)
| luizfelippesr/galmag | galmag/Observables.py | Python | gpl-3.0 | 15,729 | [
"Galaxy"
] | 6c8100294789bec90627808a21af17c2322f9718f81d3800212f93867aa77c60 |
#!/usr/bin/env python
# encoding: utf-8
#
# bpt.py
#
# Created by José Sánchez-Gallego on 19 Jan 2017.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import warnings
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
from marvin.core.exceptions import MarvinDeprecationWarning, MarvinError
from marvin.utils.plot import bind_to_figure
__ALL__ = ('get_snr', 'kewley_sf_nii', 'kewley_sf_sii', 'kewley_sf_oi',
'kewley_comp_nii', 'kewley_agn_sii', 'kewley_agn_oi',
'bpt_kewley06')
def get_snr(snr_min, emission_line, default=3):
"""Convenience function to get the minimum SNR for a certain emission line.
If ``snr_min`` is a dictionary and ``emision_line`` is one of the keys,
returns that value. If the emission line is not included in the dictionary,
returns ``default``. If ``snr_min`` is a float, returns that value
regardless of the ``emission_line``.
"""
if not isinstance(snr_min, dict):
return snr_min
if emission_line in snr_min:
return snr_min[emission_line]
else:
return default
def get_masked(maps, emline, snr=1):
"""Convenience function to get masked arrays without negative values."""
gflux = maps['emline_gflux_' + emline]
gflux_masked = gflux.masked
# Masks spaxels with flux <= 0
gflux_masked.mask |= (gflux_masked.data <= 0)
# Masks all spaxels that don't reach the cutoff SNR
gflux_masked.mask |= gflux.snr < snr
gflux_masked.mask |= gflux.ivar == 0
return gflux_masked
def _get_kewley06_axes(use_oi=True):
"""Creates custom axes for displaying Kewley06 plots."""
fig = plt.figure(None, (8.5, 10))
fig.clf()
plt.subplots_adjust(top=0.99, bottom=0.08, hspace=0.01)
# The axes for the three classification plots
grid_bpt = ImageGrid(fig, 211,
nrows_ncols=(1, 3) if use_oi else (1, 2),
direction='row',
axes_pad=0.1,
add_all=True,
label_mode='L',
share_all=False)
# The axes for the galaxy display
gal_bpt = ImageGrid(fig, 212, nrows_ncols=(1, 1))
# Plots the classification boundary lines
xx_sf_nii = np.linspace(-1.281, 0.045, int(1e4))
xx_sf_sii = np.linspace(-2, 0.315, int(1e4))
xx_sf_oi = np.linspace(-2.5, -0.7, int(1e4))
xx_comp_nii = np.linspace(-2, 0.4, int(1e4))
xx_agn_sii = np.array([-0.308, 1.0])
xx_agn_oi = np.array([-1.12, 0.5])
grid_bpt[0].plot(xx_sf_nii, kewley_sf_nii(xx_sf_nii), 'k--', zorder=90)
grid_bpt[1].plot(xx_sf_sii, kewley_sf_sii(xx_sf_sii), 'r-', zorder=90)
if use_oi:
grid_bpt[2].plot(xx_sf_oi, kewley_sf_oi(xx_sf_oi), 'r-', zorder=90)
grid_bpt[0].plot(xx_comp_nii, kewley_comp_nii(xx_comp_nii), 'r-', zorder=90)
grid_bpt[1].plot(xx_agn_sii, kewley_agn_sii(xx_agn_sii), 'b-', zorder=80)
if use_oi:
grid_bpt[2].plot(xx_agn_oi, kewley_agn_oi(xx_agn_oi), 'b-', zorder=80)
# Adds captions
grid_bpt[0].text(-1, -0.5, 'SF', ha='center', fontsize=12, zorder=100, color='c')
grid_bpt[0].text(0.5, 0.5, 'AGN', ha='left', fontsize=12, zorder=100)
grid_bpt[0].text(-0.08, -1.2, 'Comp', ha='left', fontsize=12, zorder=100, color='g')
grid_bpt[1].text(-1.2, -0.5, 'SF', ha='center', fontsize=12, zorder=100)
grid_bpt[1].text(-1, 1.2, 'Seyfert', ha='left', fontsize=12, zorder=100, color='r')
grid_bpt[1].text(0.3, -1, 'LINER', ha='left', fontsize=12, zorder=100, color='m')
if use_oi:
grid_bpt[2].text(-2, -0.5, 'SF', ha='center', fontsize=12, zorder=100)
grid_bpt[2].text(-1.5, 1, 'Seyfert', ha='left', fontsize=12, zorder=100)
grid_bpt[2].text(-0.1, -1, 'LINER', ha='right', fontsize=12, zorder=100)
# Sets the ticks, ticklabels, and other details
xtick_limits = ((-2, 1), (-1.5, 1), (-2.5, 0.5))
axes = [0, 1, 2] if use_oi else [0, 1]
for ii in axes:
grid_bpt[ii].get_xaxis().set_tick_params(direction='in')
grid_bpt[ii].get_yaxis().set_tick_params(direction='in')
grid_bpt[ii].set_xticks(np.arange(xtick_limits[ii][0], xtick_limits[ii][1] + 0.5, 0.5))
grid_bpt[ii].set_xticks(np.arange(xtick_limits[ii][0],
xtick_limits[ii][1] + 0.1, 0.1), minor=True)
grid_bpt[ii].set_yticks(np.arange(-1.5, 2.0, 0.5))
grid_bpt[ii].set_yticks(np.arange(-1.5, 1.6, 0.1), minor=True)
grid_bpt[ii].grid(which='minor', alpha=0.2)
grid_bpt[ii].grid(which='major', alpha=0.5)
grid_bpt[ii].set_xlim(xtick_limits[ii][0], xtick_limits[ii][1])
grid_bpt[ii].set_ylim(-1.5, 1.6)
if use_oi:
grid_bpt[ii].set_ylim(-1.5, 1.8)
grid_bpt[ii].spines['top'].set_visible(True)
if ii in [0, 1]:
if not use_oi and ii == 1:
continue
grid_bpt[ii].get_xticklabels()[-1].set_visible(False)
grid_bpt[0].set_ylabel(r'log([OIII]/H$\beta$)')
grid_bpt[0].set_xlabel(r'log([NII]/H$\alpha$)')
grid_bpt[1].set_xlabel(r'log([SII]/H$\alpha$)')
if use_oi:
grid_bpt[2].set_xlabel(r'log([OI]/H$\alpha$)')
gal_bpt[0].grid(False)
return fig, grid_bpt, gal_bpt[0]
def kewley_sf_nii(log_nii_ha):
"""Star forming classification line for log([NII]/Ha)."""
return 0.61 / (log_nii_ha - 0.05) + 1.3
def kewley_sf_sii(log_sii_ha):
"""Star forming classification line for log([SII]/Ha)."""
return 0.72 / (log_sii_ha - 0.32) + 1.3
def kewley_sf_oi(log_oi_ha):
"""Star forming classification line for log([OI]/Ha)."""
return 0.73 / (log_oi_ha + 0.59) + 1.33
def kewley_comp_nii(log_nii_ha):
"""Composite classification line for log([NII]/Ha)."""
return 0.61 / (log_nii_ha - 0.47) + 1.19
def kewley_agn_sii(log_sii_ha):
"""Seyfert/LINER classification line for log([SII]/Ha)."""
return 1.89 * log_sii_ha + 0.76
def kewley_agn_oi(log_oi_ha):
"""Seyfert/LINER classification line for log([OI]/Ha)."""
return 1.18 * log_oi_ha + 1.30
def bpt_kewley06(maps, snr_min=3, return_figure=True, use_oi=True, **kwargs):
"""Returns a classification of ionisation regions, as defined in Kewley+06.
Makes use of the classification system defined by
`Kewley et al. (2006) <https://ui.adsabs.harvard.edu/#abs/2006MNRAS.372..961K/abstract>`_
to return classification masks for different ionisation mechanisms. If ``return_figure=True``,
produces and returns a matplotlib figure with the classification plots (based on
Kewley+06 Fig. 4) and the 2D spatial distribution of classified spaxels (i.e., a map of the
galaxy in which each spaxel is colour-coded based on its emission mechanism).
While it is possible to call this function directly, its normal use will be via the
:func:`~marvin.tools.maps.Maps.get_bpt` method.
Parameters:
maps (a Marvin :class:`~marvin.tools.maps.Maps` object)
The Marvin Maps object that contains the emission line maps to be used to determine
the BPT classification.
snr_min (float or dict):
The signal-to-noise cutoff value for the emission lines used to generate the BPT
diagram. If ``snr_min`` is a single value, that signal-to-noise will be used for all
the lines. Alternatively, a dictionary of signal-to-noise values, with the
emission line channels as keys, can be used.
E.g., ``snr_min={'ha': 5, 'nii': 3, 'oi': 1}``. If some values are not provided,
they will default to ``SNR>=3``. Note that the value ``sii`` will be applied to both
``[SII 6718]`` and ``[SII 6732]``.
return_figure (bool):
If ``True``, it also returns the matplotlib figure_ of the BPT diagram plot,
which can be used to modify the style of the plot.
use_oi (bool):
If ``True``, uses the OI diagnostic diagram for spaxel classification.
Returns:
bpt_return:
``bpt_kewley06`` returns a dictionary of dictionaries of classification masks.
The classification masks (not to be confused with bitmasks) are boolean arrays with the
same shape as the Maps or Cube (without the spectral dimension) that can be used
to select spaxels belonging to a certain excitation process (e.g., star forming).
The returned dictionary has the following keys: ``'sf'`` (star forming), ``'comp'``
(composite), ``'agn'``, ``'seyfert'``, ``'liner'``, ``'invalid'``
(spaxels that are masked out at the DAP level), and ``'ambiguous'`` (good spaxels that
do not fall in any classification or fall in more than one). Each key provides access
to a new dictionary with keys ``'nii'`` (for the constraints in the diagram NII/Halpha
vs OIII/Hbeta), ``'sii'`` (SII/Halpha vs OIII/Hbeta), ``'oi'`` (OI/Halpha vs
OIII/Hbeta; only if ``use_oi=True``), and ``'global'``, which applies all the previous
constraints at once. The ``'ambiguous'`` mask only contains the ``'global'``
subclassification, while the ``'comp'`` dictionary only contains ``'nii'``.
``'nii'`` is not available for ``'seyfert'`` and ``'liner'``. All the global masks are
unique (a spaxel can only belong to one of them) with the exception of ``'agn'``, which
intersects with ``'seyfert'`` and ``'liner'``. Additionally, if ``return_figure=True``,
``bpt_kewley06`` will also return the matplotlib figure for the generated plot, and a
list of axes for each one of the subplots.
Example:
>>> maps_8485_1901 = Maps(plateifu='8485-1901')
>>> bpt_masks, fig, axes = bpt_kewley06(maps_8485_1901)
Gets the global mask for star forming spaxels
>>> sf = bpt_masks['sf']['global']
Gets the seyfert mask based only on the SII/Halpha vs OIII/Hbeta diagnostics
>>> seyfert_sii = bpt_masks['seyfert']['sii']
"""
if 'snr' in kwargs:
warnings.warn('snr is deprecated. Use snr_min instead. '
'snr will be removed in a future version of marvin',
MarvinDeprecationWarning)
snr_min = kwargs.pop('snr')
if len(kwargs.keys()) > 0:
raise MarvinError('unknown keyword {0}'.format(list(kwargs.keys())[0]))
# Gets the necessary emission line maps
oiii = get_masked(maps, 'oiii_5008', snr=get_snr(snr_min, 'oiii'))
nii = get_masked(maps, 'nii_6585', snr=get_snr(snr_min, 'nii'))
ha = get_masked(maps, 'ha_6564', snr=get_snr(snr_min, 'ha'))
hb = get_masked(maps, 'hb_4862', snr=get_snr(snr_min, 'hb'))
oi = get_masked(maps, 'oi_6302', snr=get_snr(snr_min, 'oi'))
sii_6718 = get_masked(maps, 'sii_6718', snr=get_snr(snr_min, 'sii'))
sii_6732 = get_masked(maps, 'sii_6732', snr=get_snr(snr_min, 'sii'))
sii = sii_6718 + sii_6732
# Calculate masked logarithms
log_oiii_hb = np.ma.log10(oiii / hb)
log_nii_ha = np.ma.log10(nii / ha)
log_sii_ha = np.ma.log10(sii / ha)
log_oi_ha = np.ma.log10(oi / ha)
# Calculates masks for each emission mechanism according to the paper boundaries.
# The log_nii_ha < 0.05, log_sii_ha < 0.32, etc are necessary because the classification lines
# diverge and we only want the region before the asymptota.
sf_mask_nii = ((log_oiii_hb < kewley_sf_nii(log_nii_ha)) & (log_nii_ha < 0.05)).filled(False)
sf_mask_sii = ((log_oiii_hb < kewley_sf_sii(log_sii_ha)) & (log_sii_ha < 0.32)).filled(False)
sf_mask_oi = ((log_oiii_hb < kewley_sf_oi(log_oi_ha)) & (log_oi_ha < -0.59)).filled(False)
sf_mask = sf_mask_nii & sf_mask_sii & sf_mask_oi if use_oi else sf_mask_nii & sf_mask_sii
comp_mask = ((log_oiii_hb > kewley_sf_nii(log_nii_ha)) & (log_nii_ha < 0.05)).filled(False) & \
((log_oiii_hb < kewley_comp_nii(log_nii_ha)) & (log_nii_ha < 0.465)).filled(False)
comp_mask &= (sf_mask_sii & sf_mask_oi) if use_oi else sf_mask_sii
agn_mask_nii = ((log_oiii_hb > kewley_comp_nii(log_nii_ha)) |
(log_nii_ha > 0.465)).filled(False)
agn_mask_sii = ((log_oiii_hb > kewley_sf_sii(log_sii_ha)) |
(log_sii_ha > 0.32)).filled(False)
agn_mask_oi = ((log_oiii_hb > kewley_sf_oi(log_oi_ha)) |
(log_oi_ha > -0.59)).filled(False)
agn_mask = agn_mask_nii & agn_mask_sii & agn_mask_oi if use_oi else agn_mask_nii & agn_mask_sii
seyfert_mask_sii = agn_mask & (kewley_agn_sii(log_sii_ha) < log_oiii_hb).filled(False)
seyfert_mask_oi = agn_mask & (kewley_agn_oi(log_oi_ha) < log_oiii_hb).filled(False)
seyfert_mask = seyfert_mask_sii & seyfert_mask_oi if use_oi else seyfert_mask_sii
liner_mask_sii = agn_mask & (kewley_agn_sii(log_sii_ha) > log_oiii_hb).filled(False)
liner_mask_oi = agn_mask & (kewley_agn_oi(log_oi_ha) > log_oiii_hb).filled(False)
liner_mask = liner_mask_sii & liner_mask_oi if use_oi else liner_mask_sii
# The invalid mask is the combination of spaxels that are invalid in all of the emission maps
invalid_mask_nii = ha.mask | oiii.mask | nii.mask | hb.mask
invalid_mask_sii = ha.mask | oiii.mask | sii.mask | hb.mask
invalid_mask_oi = ha.mask | oiii.mask | oi.mask | hb.mask
invalid_mask = ha.mask | oiii.mask | nii.mask | hb.mask | sii.mask
if use_oi:
invalid_mask |= oi.mask
# The ambiguous mask are spaxels that are not invalid but don't fall into any of the
# emission mechanism classifications.
ambiguous_mask = ~(sf_mask | comp_mask | seyfert_mask | liner_mask) & ~invalid_mask
sf_classification = {'global': sf_mask,
'nii': sf_mask_nii,
'sii': sf_mask_sii}
comp_classification = {'global': comp_mask,
'nii': comp_mask}
agn_classification = {'global': agn_mask,
'nii': agn_mask_nii,
'sii': agn_mask_sii}
seyfert_classification = {'global': seyfert_mask,
'sii': seyfert_mask_sii}
liner_classification = {'global': liner_mask,
'sii': liner_mask_sii}
invalid_classification = {'global': invalid_mask,
'nii': invalid_mask_nii,
'sii': invalid_mask_sii}
ambiguous_classification = {'global': ambiguous_mask}
if use_oi:
sf_classification['oi'] = sf_mask_oi
agn_classification['oi'] = agn_mask_oi
seyfert_classification['oi'] = seyfert_mask_oi
liner_classification['oi'] = liner_mask_oi
invalid_classification['oi'] = invalid_mask_oi
bpt_return_classification = {'sf': sf_classification,
'comp': comp_classification,
'agn': agn_classification,
'seyfert': seyfert_classification,
'liner': liner_classification,
'invalid': invalid_classification,
'ambiguous': ambiguous_classification}
if not return_figure:
return bpt_return_classification
# Does all the plotting
with plt.style.context('seaborn-darkgrid'):
fig, grid_bpt, gal_bpt = _get_kewley06_axes(use_oi=use_oi)
sf_kwargs = {'marker': 's', 's': 12, 'color': 'c', 'zorder': 50, 'alpha': 0.7, 'lw': 0.0,
'label': 'Star-forming'}
sf_handler = grid_bpt[0].scatter(log_nii_ha[sf_mask], log_oiii_hb[sf_mask], **sf_kwargs)
grid_bpt[1].scatter(log_sii_ha[sf_mask], log_oiii_hb[sf_mask], **sf_kwargs)
comp_kwargs = {'marker': 's', 's': 12, 'color': 'g', 'zorder': 45, 'alpha': 0.7, 'lw': 0.0,
'label': 'Composite'}
comp_handler = grid_bpt[0].scatter(log_nii_ha[comp_mask], log_oiii_hb[comp_mask],
**comp_kwargs)
grid_bpt[1].scatter(log_sii_ha[comp_mask], log_oiii_hb[comp_mask], **comp_kwargs)
seyfert_kwargs = {'marker': 's', 's': 12, 'color': 'r', 'zorder': 40, 'alpha': 0.7, 'lw': 0.0,
'label': 'Seyfert'}
seyfert_handler = grid_bpt[0].scatter(log_nii_ha[seyfert_mask], log_oiii_hb[seyfert_mask],
**seyfert_kwargs)
grid_bpt[1].scatter(log_sii_ha[seyfert_mask], log_oiii_hb[seyfert_mask], **seyfert_kwargs)
liner_kwargs = {'marker': 's', 's': 12, 'color': 'm', 'zorder': 35, 'alpha': 0.7, 'lw': 0.0,
'label': 'LINER'}
liner_handler = grid_bpt[0].scatter(log_nii_ha[liner_mask], log_oiii_hb[liner_mask],
**liner_kwargs)
grid_bpt[1].scatter(log_sii_ha[liner_mask], log_oiii_hb[liner_mask], **liner_kwargs)
amb_kwargs = {'marker': 's', 's': 12, 'color': '0.6', 'zorder': 30, 'alpha': 0.7, 'lw': 0.0,
'label': 'Ambiguous '}
amb_handler = grid_bpt[0].scatter(log_nii_ha[ambiguous_mask], log_oiii_hb[ambiguous_mask],
**amb_kwargs)
grid_bpt[1].scatter(log_sii_ha[ambiguous_mask], log_oiii_hb[ambiguous_mask], **amb_kwargs)
if use_oi:
grid_bpt[2].scatter(log_oi_ha[sf_mask], log_oiii_hb[sf_mask], **sf_kwargs)
grid_bpt[2].scatter(log_oi_ha[comp_mask], log_oiii_hb[comp_mask], **comp_kwargs)
grid_bpt[2].scatter(log_oi_ha[seyfert_mask], log_oiii_hb[seyfert_mask], **seyfert_kwargs)
grid_bpt[2].scatter(log_oi_ha[liner_mask], log_oiii_hb[liner_mask], **liner_kwargs)
grid_bpt[2].scatter(log_oi_ha[ambiguous_mask], log_oiii_hb[ambiguous_mask], **amb_kwargs)
# Creates the legend
grid_bpt[0].legend([sf_handler, comp_handler, seyfert_handler, liner_handler, amb_handler],
['Star-forming', 'Composite', 'Seyfert', 'LINER', 'Ambiguous'], ncol=2,
loc='upper left', frameon=True, labelspacing=0.1, columnspacing=0.1,
handletextpad=0.1, fontsize=9)
# Creates a RGB image of the galaxy, and sets the colours of the spaxels to match the
# classification masks
gal_rgb = np.zeros((ha.shape[0], ha.shape[1], 3), dtype=np.uint8)
for ii in [1, 2]: # Cyan
gal_rgb[:, :, ii][sf_mask] = 255
gal_rgb[:, :, 1][comp_mask] = 128 # Green
gal_rgb[:, :, 0][seyfert_mask] = 255 # Red
# Magenta
gal_rgb[:, :, 0][liner_mask] = 255
gal_rgb[:, :, 2][liner_mask] = 255
for ii in [0, 1, 2]:
gal_rgb[:, :, ii][invalid_mask] = 255 # White
gal_rgb[:, :, ii][ambiguous_mask] = 169 # Grey
# Shows the image.
gal_bpt.imshow(gal_rgb, origin='lower', aspect='auto', interpolation='nearest')
gal_bpt.set_xlim(0, ha.shape[1] - 1)
gal_bpt.set_ylim(0, ha.shape[0] - 1)
gal_bpt.set_xlabel('x [spaxels]')
gal_bpt.set_ylabel('y [spaxels]')
axes = grid_bpt.axes_all + [gal_bpt]
# Adds custom method to create figure
for ax in axes:
setattr(ax.__class__, 'bind_to_figure', _bind_to_figure)
return (bpt_return_classification, fig, axes)
def _bind_to_figure(self, fig=None):
"""Copies axes to a new figure.
Uses ``marvin.utils.plot.utils.bind_to_figure`` with a number
of tweaks.
"""
new_figure = bind_to_figure(self, fig=fig)
if new_figure.axes[0].get_ylabel() == '':
new_figure.axes[0].set_ylabel('log([OIII]/H$\\beta$)')
return new_figure
| albireox/marvin | python/marvin/utils/dap/bpt.py | Python | bsd-3-clause | 19,622 | [
"Galaxy"
] | 07f6b80cb0cf3ffa3079e8a8d47195ccb8f3743b7d7113eeb2d76bb435c2d2d5 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class.
@@Gaussian
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Gaussian(object):
"""The Normal (Gaussian) distribution with mean mu and stddev sigma.
The PDF of this distribution is:
f(x) = sqrt(1/(2*pi*sigma^2)) exp(-(x-mu)^2/(2*sigma^2))
"""
def __init__(self, mu, sigma):
"""Construct a new Gaussian distribution with mean mu and stddev sigma.
Args:
mu: Scalar tensor, the mean of the distribution.
sigma: Scalar tensor, the precision of the distribution.
Raises:
TypeError: if mu and sigma are different dtypes.
"""
self._mu = ops.convert_to_tensor(mu)
self._sigma = ops.convert_to_tensor(sigma)
if mu.dtype != sigma.dtype:
raise TypeError("Expected same dtype for mu, sigma but got: %s vs. %s"
% (mu.dtype, sigma.dtype))
@property
def dtype(self):
return self._mu.dtype
@property
def shape(self):
return constant_op.constant([]) # Scalar
@property
def mu(self):
return self._mu
@property
def sigma(self):
return self._sigma
def log_pdf(self, x):
"""Log likelihood of observations in x under Gaussian with mu and sigma.
Args:
x: 1-D, a vector of observations.
Returns:
log_lik: 1-D, a vector of log likelihoods of `x` under the model.
"""
return (-0.5*math.log(2 * math.pi) - math_ops.log(self._sigma)
-0.5*math_ops.square((x - self._mu) / self._sigma))
def cdf(self, x):
"""CDF of observations in x under Gaussian with mu and sigma.
Args:
x: 1-D, a vector of observations.
Returns:
cdf: 1-D, a vector of CDFs of `x` under the model.
"""
return (0.5 + 0.5*math_ops.erf(
1.0/(math.sqrt(2.0) * self._sigma)*(x - self._mu)))
def log_cdf(self, x):
"""Log of the CDF of observations x under Gaussian with mu and sigma."""
return math_ops.log(self.cdf(x))
def pdf(self, x):
"""The PDF for observations x.
Args:
x: 1-D, a vector of observations.
Returns:
pdf: 1-D, a vector of pdf values of `x` under the model.
"""
return math_ops.exp(self.log_pdf(x))
def sample(self, n, seed=None):
"""Sample `n` observations from this Distribution.
Args:
n: Scalar int `Tensor`, the number of observations to sample.
seed: Python integer, the random seed.
Returns:
samples: A vector of samples with shape `[n]`.
"""
return random_ops.random_normal(
shape=array_ops.expand_dims(n, 0), mean=self._mu,
stddev=self._sigma, dtype=self._mu.dtype, seed=seed)
| panmari/tensorflow | tensorflow/contrib/distributions/python/ops/gaussian.py | Python | apache-2.0 | 3,593 | [
"Gaussian"
] | e8355fc65cdb9c61257a2ce14401b7855872a909f7dffa6f2b00e4085a804414 |
"""
This module contains code for storing the output of tabular HMMer parses, in
analogy to the BLAST record class of BioPython. Idea is to hold on to all of the
possible attributes of a HMMer result so that they can be iterated over after
"""
class HMMerRecord:
def __init__(self, hmmer_type):
self.hmmer_type = hmmer_type
self.descriptions = []
def add_description(self, desc):
"""Simple convenience function"""
self.descriptions.append(desc)
class ProtDescr:
def __init__(self, target_name, target_accession, query_name, query_accession,
evalue, score, bias, dom_evalue, dom_score, dom_bias, exp, reg, clu,
ov, env, dom, rep, inc, desc):
self.target_name = target_name
self.target_accession = target_accession
self.query_name = query_name
self.query_accession = query_accession
self.e = float(evalue)
self.score = float(score)
self.bias = float(bias)
self.dom_evalue = float(dom_evalue)
self.dom_score = float(dom_score)
self.dom_bias = float(dom_bias)
self.exp = float(exp)
self.reg = int(reg)
self.clu = int(clu)
self.ov = int(ov)
self.env = int(env)
self.dom = int(dom)
self.rep = int(rep)
self.inc = int(inc)
self.desc = desc
if self.desc == '-': # not simply '', hmmer writes as '-'
self.title = self.target_name # all of the title is in the name
else:
self.title = (self.target_name + ' ' + self.desc) # stay consistent with BLAST
class NuclDescr:
def __init__(self, target_name, target_accession, query_name, query_accession,
hmm_from, hmm_to, ali_from, ali_to, env_from, env_to, seq_length,
strand, evalue, score, bias, desc):
self.target_name = target_name
self.target_accession = target_accession
self.query_name = query_name
self.query_accession = query_accession
self.hmm_from = hmm_from
self.hmm_to = hmm_to
self.ali_from = ali_from
self.ali_to = ali_to
self.env_from = env_from
self.env_to = env_to
self.seq_length = seq_length
self.strand = strand
self.evalue = evalue
self.score = score
self.bias = bias
self.desc = desc
| chris-klinger/Goat | searches/hmmer/hmmer_record.py | Python | gpl-3.0 | 2,362 | [
"BLAST",
"Biopython"
] | 14e13727cdd19ffbe32bfe502c9f43a0f61976fb0afe594e9021f2bed9e8c183 |
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import moose
runtime = 120.0
def makeModel():
# create container for model
model = moose.Neutral( 'model' )
harmonic = moose.CubeMesh( '/model/harmonic' )
harmonic.volume = 1e-15
lotka = moose.CubeMesh( '/model/lotka' )
lotka.volume = 1e-15
# create molecules and reactions
p = moose.Pool( '/model/harmonic/p' )
v = moose.Pool( '/model/harmonic/v' )
pdot = moose.Function( '/model/harmonic/p/func' )
vdot = moose.Function( '/model/harmonic/v/func' )
# Parameters
offset1 = 1.0
offset2 = 1.0
k = 0.1
p.nInit = offset1
v.nInit = offset2 + 0.1
pdot.x.num = 1
vdot.x.num = 1
pdot.expr = "x0 - " + str( offset1 )
vdot.expr = "-" + str( k ) + " * (x0 - " + str( offset2 ) + ")"
# connect them up for reactions
moose.connect( p, 'nOut', vdot.x[0], 'input' )
moose.connect( v, 'nOut', pdot.x[0], 'input' )
moose.connect( vdot, 'valueOut', v, 'increment' )
moose.connect( pdot, 'valueOut', p, 'increment' )
# Create the output tables
graphs = moose.Neutral( '/model/graphs' )
pplot = moose.Table2 ( '/model/graphs/p' )
vplot = moose.Table2 ( '/model/graphs/v' )
# connect up the tables
moose.connect( pplot, 'requestOut', p, 'getN' );
moose.connect( vplot, 'requestOut', v, 'getN' );
def main():
"""
funcRateHarmonicOsc illustrates the use of function objects to
directly define the rates of change of pool concentration. This
example shows how to set up a simple harmonic oscillator system
of differential equations using the script. In normal use one would
prefer to use SBML.
The equations are ::
p' = v - offset1
v' = -k(p - offset2)
where the rates for Pools p and v are computed using Functions.
Note the use of offsets. This is because MOOSE chemical
systems cannot have negative concentrations.
The model is set up to run using default Exponential Euler
integration, and then using the GSL deterministic solver.
"""
makeModel()
for i in range( 11, 18 ):
moose.setClock( i, 0.01 )
moose.setClock( 18, 0.1 )
moose.reinit()
moose.start( runtime ) # Run the model
# Iterate through all plots, dump their contents to data.plot.
for x in moose.wildcardFind( '/model/graphs/#' ):
#x.xplot( 'scriptKineticModel.plot', x.name )
t = numpy.arange( 0, x.vector.size, 1 ) * x.dt # sec
pylab.plot( t, x.vector, label=x.name )
pylab.suptitle( "Integration using ee" )
pylab.legend()
pylab.figure()
compt = moose.element( '/model/harmonic' )
ksolve = moose.Ksolve( '/model/harmonic/ksolve' )
stoich = moose.Stoich( '/model/harmonic/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.path = '/model/harmonic/##'
for i in range( 11, 18 ):
moose.setClock( i, 0.1 )
moose.reinit()
moose.start( runtime ) # Run the model
for x in moose.wildcardFind( '/model/graphs/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * x.dt # sec
pylab.plot( t, x.vector, label=x.name )
pylab.suptitle( "Integration using gsl" )
pylab.legend()
pylab.show()
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| dilawar/moose-full | moose-examples/snippets/funcRateHarmonicOsc.py | Python | gpl-2.0 | 3,873 | [
"MOOSE"
] | 81b5e190974bba55155f78aec6ba7782da5ddb76433fafbc9e633a10845edbf5 |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os, sys
import subprocess
import json
import unittest
from FactorySystem import Parser
import pyhit
def find_app():
"""
Find the executable to use, respecting MOOSE_DIR and METHOD
"""
moose_dir = os.environ.get("MOOSE_DIR")
if not moose_dir:
p = subprocess.Popen('git rev-parse --show-cdup', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
p.wait()
if p.returncode == 0:
git_dir = p.communicate()[0]
moose_dir = os.path.abspath(os.path.join(os.getcwd(), git_dir)).rstrip()
else:
print("Could not find top level moose directory. Please set the MOOSE_DIR environment variable.")
sys.exit(1)
app_name = os.path.join(moose_dir, "test", "moose_test-%s" % os.environ.get("METHOD", "opt"))
return app_name
def run_app(args=[]):
"""
Run the app and return the output.
Exits if the app failed to run for any reason.
"""
proc = None
app_name = find_app()
args.insert(0, app_name)
# "-options_left 0" is used to stop the debug version of PETSc from printing
# out WARNING messages that sometime confuse the json parser
args.insert(1, "-options_left")
args.insert(2, "0")
cmd_line = ' '.join(args)
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError as e:
print("Problem running '%s'\nError: %s" % (cmd_line, e))
sys.exit(1)
data = proc.communicate()
stdout_data = data[0].decode("utf-8")
if proc.returncode != 0:
print("Failed with exit code %s" % proc.returncode)
sys.exit(proc.returncode)
return stdout_data
class TestHITBase(unittest.TestCase):
def getBlockSections(self, node):
return {c.name: c for c in node}
def getBlockParams(self, node):
return {k:v for k, v in node.params()}
def getInputFileFormat(self, extra=[]):
"""
Does a dump and uses the GetPotParser to parse the output.
"""
args = ["--disable-refcount-printing", "--dump"] + extra
output = run_app(args)
self.assertIn("### START DUMP DATA ###\n", output)
self.assertIn("### END DUMP DATA ###\n", output)
output = output.split('### START DUMP DATA ###\n')[1]
output = output.split('### END DUMP DATA ###')[0]
self.assertNotEqual(len(output), 0)
root = pyhit.parse(output)
errors = list(Parser.checkDuplicates(root))
self.assertEqual(errors, [])
return root
class TestInputFileFormat(TestHITBase):
def testInputFileFormat(self):
"""
Some basic checks to see if some data
is there and is in the right location.
"""
root = self.getInputFileFormat()
root_sections = self.getBlockSections(root)
self.assertIn("Executioner", root_sections)
self.assertIn("BCs", root_sections)
bcs_sections = self.getBlockSections(root_sections["BCs"])
self.assertIn("Periodic", bcs_sections)
self.assertIn("*", bcs_sections)
star = bcs_sections["*"]
bcs_star_params = self.getBlockParams(star)
bcs_star_sections = self.getBlockSections(star)
self.assertIn("active", bcs_star_params)
self.assertIn("<types>", bcs_star_sections)
bcs_star_types_sections = self.getBlockSections(bcs_star_sections["<types>"])
self.assertIn("<DirichletBC>", bcs_star_types_sections)
periodic_children = self.getBlockSections(bcs_sections["Periodic"])
self.assertEqual(len(periodic_children.keys()), 1)
self.assertIn("*", periodic_children)
self.assertNotIn("<types>", bcs_sections)
exe_sections = self.getBlockSections(root_sections["Executioner"])
self.assertIn("<types>", exe_sections)
exe_types_sections = self.getBlockSections(exe_sections["<types>"])
self.assertIn("<Transient>", exe_types_sections)
# Preconditioning has a Preconditioning/*/* syntax which is unusual
self.assertIn("Preconditioning", root_sections)
p = root_sections["Preconditioning"]
pc_sections = self.getBlockSections(p)
pc_star_sections = self.getBlockSections(pc_sections["*"])
pc_star_star_sections = self.getBlockSections(pc_star_sections["*"])
pc_star_star_types_sections = self.getBlockSections(pc_star_star_sections["<types>"])
split_params = self.getBlockParams(pc_star_star_types_sections["<Split>"])
self.assertIn("splitting_type", split_params)
self.assertIn("petsc_options", split_params)
# Make sure the default dump has test objects
self.assertIn("ApplyInputParametersTest", root_sections)
class TestInputFileFormatSearch(TestHITBase):
def testInputFileFormatSearch(self):
"""
Make sure parameter search works
"""
root = self.getInputFileFormat(["initial_steps"])
section_map = self.getBlockSections(root)
self.assertNotIn("Executioner", section_map)
self.assertNotIn("BCs", section_map)
self.assertIn("Adaptivity", section_map)
self.assertEqual(len(section_map.keys()), 1)
adaptivity = section_map["Adaptivity"]
params = self.getBlockParams(adaptivity)
self.assertIn("initial_steps", params)
self.assertEqual(len(params.keys()), 1)
if __name__ == '__main__':
unittest.main(__name__, verbosity=2)
| nuclear-wizard/moose | test/tests/outputs/format/test_hit_output.py | Python | lgpl-2.1 | 5,797 | [
"MOOSE"
] | 94d55a2fcf2cbd766baeb9e7893a1792adbc137ff3b78f5276c0e0731c504db9 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
OpenBabel interface module, which opens up access to the hundreds of file
formats supported by OpenBabel. Requires openbabel with python bindings to be
installed. Please consult the
`openbabel documentation <http://openbabel.org/wiki/Main_Page>`_.
"""
import warnings
import copy
from pymatgen.core.structure import Molecule
from pymatgen.analysis.graphs import MoleculeGraph
from monty.dev import requires
try:
from openbabel import openbabel as ob
from openbabel import pybel as pb
except Exception:
ob = None
__author__ = "Shyue Ping Ong, Qi Wang"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 28, 2012"
class BabelMolAdaptor:
"""
Adaptor serves as a bridge between OpenBabel's Molecule and pymatgen's
Molecule.
"""
@requires(ob,
"BabelMolAdaptor requires openbabel to be installed with "
"Python bindings. Please get it at http://openbabel.org "
"(version >=3.0.0).")
def __init__(self, mol):
"""
Initializes with pymatgen Molecule or OpenBabel"s OBMol.
Args:
mol: pymatgen's Molecule or OpenBabel OBMol
"""
if isinstance(mol, Molecule):
if not mol.is_ordered:
raise ValueError("OpenBabel Molecule only supports ordered "
"molecules.")
# For some reason, manually adding atoms does not seem to create
# the correct OBMol representation to do things like force field
# optimization. So we go through the indirect route of creating
# an XYZ file and reading in that file.
obmol = ob.OBMol()
obmol.BeginModify()
for site in mol:
coords = [c for c in site.coords]
atomno = site.specie.Z
obatom = ob.OBAtom()
obatom.thisown = 0
obatom.SetAtomicNum(atomno)
obatom.SetVector(*coords)
obmol.AddAtom(obatom)
del obatom
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
obmol.SetTotalSpinMultiplicity(mol.spin_multiplicity)
obmol.SetTotalCharge(int(mol.charge))
obmol.Center()
obmol.EndModify()
self._obmol = obmol
elif isinstance(mol, ob.OBMol):
self._obmol = mol
@property
def pymatgen_mol(self):
"""
Returns pymatgen Molecule object.
"""
sp = []
coords = []
for atom in ob.OBMolAtomIter(self._obmol):
sp.append(atom.GetAtomicNum())
coords.append([atom.GetX(), atom.GetY(), atom.GetZ()])
return Molecule(sp, coords)
@property
def openbabel_mol(self):
"""
Returns OpenBabel's OBMol.
"""
return self._obmol
def localopt(self, forcefield='mmff94', steps=500):
"""
A wrapper to pybel's localopt method to optimize a Molecule.
Args:
forcefield: Default is mmff94. Options are 'gaff', 'ghemical',
'mmff94', 'mmff94s', and 'uff'.
steps: Default is 500.
"""
pbmol = pb.Molecule(self._obmol)
pbmol.localopt(forcefield=forcefield, steps=steps)
self._obmol = pbmol.OBMol
def make3d(self, forcefield="mmff94", steps=50):
"""
A wrapper to pybel's make3D method generate a 3D structure from a
2D or 0D structure.
The 3D structure is made very quickly using a combination of rules
(e.g. sp3 atoms should have four bonds arranged in a tetrahedron) and
ring templates (e.g. cyclohexane is shaped like a chair). Once 3D
coordinates are generated, hydrogens are added and a quick local
optimization is carried out as default.
The generated 3D structure can have clashes or have high energy
structures due to some strain. Please consider to use the conformer
search or geometry optimization to further optimize the structure.
Args:
forcefield: Default is mmff94. Options are 'gaff', 'ghemical',
'mmff94', 'mmff94s', and 'uff'.
steps: Default is 50.
"""
pbmol = pb.Molecule(self._obmol)
pbmol.make3D(forcefield=forcefield, steps=steps)
self._obmol = pbmol.OBMol
def add_hydrogen(self):
"""
Add hydrogens (make all hydrogen explicit).
"""
self._obmol.AddHydrogens()
def remove_bond(self, idx1, idx2):
"""
Remove a bond from an openbabel molecule
Args:
idx1: The atom index of one of the atoms participating the in bond
idx2: The atom index of the other atom participating in the bond
"""
for obbond in ob.OBMolBondIter(self._obmol):
if (obbond.GetBeginAtomIdx() == idx1 and obbond.GetEndAtomIdx() == idx2) or (
obbond.GetBeginAtomIdx() == idx2 and obbond.GetEndAtomIdx() == idx1):
self._obmol.DeleteBond(obbond)
def rotor_conformer(self, *rotor_args, algo="WeightedRotorSearch",
forcefield="mmff94"):
"""
Conformer search based on several Rotor Search algorithms of openbabel.
If the input molecule is not 3D, make3d will be called (generate 3D
structure, add hydrogen, a quick localopt). All hydrogen atoms need
to be made explicit.
Args:
rotor_args: pass args to Rotor Search in openbabel.
for "WeightedRotorSearch": (conformers, geomSteps,
sampleRingBonds-default False)
for "SystematicRotorSearch": (geomSteps-default 2500,
sampleRingBonds-default False)
for "RandomRotorSearch": (conformers, geomSteps-default 2500,
sampleRingBonds-default False)
algo (str): Default is "WeightedRotorSearch". Options are
"SystematicRotorSearch", "RandomRotorSearch", and
"WeightedRotorSearch".
forcefield (str): Default is mmff94. Options are 'gaff', 'ghemical',
'mmff94', 'mmff94s', and 'uff'.
"""
if self._obmol.GetDimension() != 3:
self.make3d()
else:
self.add_hydrogen()
ff = ob.OBForceField_FindType(forcefield)
if ff == 0:
warnings.warn("This input forcefield {} is not supported "
"in openbabel. The forcefield will be reset as "
"default 'mmff94' for now.".format(forcefield))
ff = ob.OBForceField_FindType("mmff94")
try:
rotor_search = getattr(ff, algo)
except AttributeError:
warnings.warn("This input conformer search algorithm {} is not "
"supported in openbabel. Options are "
"'SystematicRotorSearch', 'RandomRotorSearch' "
"and 'WeightedRotorSearch'. "
"The algorithm will be reset as default "
"'WeightedRotorSearch' for now.".format(algo))
rotor_search = ff.WeightedRotorSearch
rotor_search(*rotor_args)
ff.GetConformers(self._obmol)
def gen3d_conformer(self):
"""
A combined method to first generate 3D structures from 0D or 2D
structures and then find the minimum energy conformer:
1. Use OBBuilder to create a 3D structure using rules and ring templates
2. Do 250 steps of a steepest descent geometry optimization with the
MMFF94 forcefield
3. Do 200 iterations of a Weighted Rotor conformational search
(optimizing each conformer with 25 steps of a steepest descent)
4. Do 250 steps of a conjugate gradient geometry optimization.
Warning from openbabel docs:
For many applications where 100s if not 1000s of molecules need to be
processed, gen3d is rather SLOW. Sometimes this function can cause a
segmentation fault.
A future version of Open Babel will provide options for slow/medium/fast
3D structure generation which will involve different compromises
between speed and finding the global energy minimum.
"""
gen3d = ob.OBOp.FindType("Gen3D")
gen3d.Do(self._obmol)
def confab_conformers(self, forcefield="mmff94", freeze_atoms=None,
rmsd_cutoff=0.5, energy_cutoff=50.0,
conf_cutoff=100000, verbose=False):
"""
Conformer generation based on Confab to generate all diverse low-energy
conformers for molecules. This is different from rotor_conformer or
gen3d_conformer as it aims to not simply to find a low energy
conformation but to generate several different conformations.
Args:
forcefield (str): Default is mmff94. Options are 'gaff', 'ghemical',
'mmff94', 'mmff94s', and 'uff'.
freeze_atoms ([int]): index of atoms to be freezed when performing
conformer search, default is None.
rmsd_cutoff (float): rmsd_cufoff, default is 0.5 Angstrom.
energy_cutoff (float): energy_cutoff, default is 50.0 kcal/mol.
conf_cutoff (float): max number of conformers to test,
default is 1 million.
verbose (bool): whether to display information on torsions found,
default is False.
Returns:
(list): list of pymatgen Molecule objects for generated conformers.
"""
if self._obmol.GetDimension() != 3:
self.make3d()
else:
self.add_hydrogen()
ff = ob.OBForceField_FindType(forcefield)
if ff == 0:
print("Could not find forcefield {} in openbabel, the forcefield "
"will be reset as default 'mmff94'".format(forcefield))
ff = ob.OBForceField_FindType("mmff94")
if freeze_atoms:
print('{} atoms will be freezed'.format(len(freeze_atoms)))
constraints = ob.OBFFConstraints()
for atom in ob.OBMolAtomIter(self._obmol):
atom_id = atom.GetIndex() + 1
if id in freeze_atoms:
constraints.AddAtomConstraint(atom_id)
ff.SetConstraints(constraints)
# Confab conformer generation
ff.DiverseConfGen(rmsd_cutoff, conf_cutoff, energy_cutoff,
verbose)
ff.GetConformers(self._obmol)
# Number of conformers generated by Confab conformer generation
conformer_num = self._obmol.NumConformers()
conformers = []
for i in range(conformer_num):
self._obmol.SetConformer(i)
conformer = copy.deepcopy(BabelMolAdaptor(self._obmol).pymatgen_mol)
conformers.append(conformer)
self._obmol.SetConformer(0)
return conformers
@property
def pybel_mol(self):
"""
Returns Pybel's Molecule object.
"""
return pb.Molecule(self._obmol)
def write_file(self, filename, file_format="xyz"):
"""
Uses OpenBabel to output all supported formats.
Args:
filename: Filename of file to output
file_format: String specifying any OpenBabel supported formats.
"""
mol = pb.Molecule(self._obmol)
return mol.write(file_format, filename, overwrite=True)
@staticmethod
def from_file(filename, file_format="xyz", return_all_molecules=False):
"""
Uses OpenBabel to read a molecule from a file in all supported formats.
Args:
filename: Filename of input file
file_format: String specifying any OpenBabel supported formats.
return_all_molecules: If ``True``, will return a list of
``BabelMolAdaptor`` instances, one for each molecule found in
the file. If ``False``, will return only the first molecule.
Returns:
BabelMolAdaptor object or list thereof
"""
mols = pb.readfile(str(file_format), str(filename))
if return_all_molecules:
return [BabelMolAdaptor(mol.OBMol) for mol in mols]
else:
return BabelMolAdaptor(next(mols).OBMol)
@staticmethod
def from_molecule_graph(mol):
"""
Read a molecule from a pymatgen MoleculeGraph object.
Args:
mol: pymatgen MoleculeGraph object.
Returns:
BabelMolAdaptor object
"""
if isinstance(mol, MoleculeGraph):
return BabelMolAdaptor(mol.molecule)
@staticmethod
def from_string(string_data, file_format="xyz"):
"""
Uses OpenBabel to read a molecule from a string in all supported
formats.
Args:
string_data: String containing molecule data.
file_format: String specifying any OpenBabel supported formats.
Returns:
BabelMolAdaptor object
"""
mols = pb.readstring(str(file_format), str(string_data))
return BabelMolAdaptor(mols.OBMol)
| gVallverdu/pymatgen | pymatgen/io/babel.py | Python | mit | 13,465 | [
"Open Babel",
"Pybel",
"pymatgen"
] | e6a6ea07e9883e3c1b1f9fcd7d0eed02af48b49894f12b03be1772d620d73b4d |
#
# libtcod 1.5.1 python wrapper
# Copyright (c) 2008,2009,2010 Jice & Mingos
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Jice or Mingos may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JICE AND MINGOS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JICE OR MINGOS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import ctypes
import struct
from ctypes import *
if not hasattr(ctypes, "c_bool"): # for Python < 2.6
c_bool = c_uint8
try: #import NumPy if available
import numpy
numpy_available = True
except ImportError:
numpy_available = False
LINUX=False
MAC=False
MINGW=False
MSVC=False
if sys.platform.find('linux') != -1:
_lib = ctypes.cdll['./tools/libtcod/libtcod.so']
LINUX=True
elif sys.platform.find('darwin') != -1:
_lib = ctypes.cdll['./tools/libtcod/libtcod.dylib']
MAC = True
elif sys.platform.find('haiku') != -1:
_lib = ctypes.cdll['./tools/libtcod/libtcod.so']
HAIKU = True
else:
try:
_lib = ctypes.cdll['./tools/libtcod/libtcod-mingw.dll']
MINGW=True
except WindowsError:
_lib = ctypes.cdll['./tools/libtcod/libtcod-VS.dll']
MSVC=True
# On Windows, ctypes doesn't work well with function returning structs,
# so we have to user the _wrapper functions instead
_lib.TCOD_color_multiply = _lib.TCOD_color_multiply_wrapper
_lib.TCOD_color_add = _lib.TCOD_color_add_wrapper
_lib.TCOD_color_multiply_scalar = _lib.TCOD_color_multiply_scalar_wrapper
_lib.TCOD_color_subtract = _lib.TCOD_color_subtract_wrapper
_lib.TCOD_color_lerp = _lib.TCOD_color_lerp_wrapper
_lib.TCOD_console_get_default_background = _lib.TCOD_console_get_default_background_wrapper
_lib.TCOD_console_get_default_foreground = _lib.TCOD_console_get_default_foreground_wrapper
_lib.TCOD_console_get_char_background = _lib.TCOD_console_get_char_background_wrapper
_lib.TCOD_console_get_char_foreground = _lib.TCOD_console_get_char_foreground_wrapper
_lib.TCOD_console_get_fading_color = _lib.TCOD_console_get_fading_color_wrapper
_lib.TCOD_image_get_pixel = _lib.TCOD_image_get_pixel_wrapper
_lib.TCOD_image_get_mipmap_pixel = _lib.TCOD_image_get_mipmap_pixel_wrapper
_lib.TCOD_parser_get_color_property = _lib.TCOD_parser_get_color_property_wrapper
HEXVERSION = 0x010501
STRVERSION = "1.5.1"
TECHVERSION = 0x01050103
############################
# color module
############################
class Color(Structure):
_fields_ = [('r', c_uint8),
('g', c_uint8),
('b', c_uint8),
]
def __eq__(self, c):
return _lib.TCOD_color_equals(self, c)
def __mul__(self, c):
if isinstance(c,Color):
return _lib.TCOD_color_multiply(self, c)
else:
return _lib.TCOD_color_multiply_scalar(self, c_float(c))
def __add__(self, c):
return _lib.TCOD_color_add(self, c)
def __sub__(self, c):
return _lib.TCOD_color_subtract(self, c)
def __repr__(self):
return "Color(%d,%d,%d)" % (self.r, self.g, self.b)
def __getitem__(self, i):
if type(i) == str:
return getattr(self, i)
else:
return getattr(self, "rgb"[i])
def __setitem__(self, i, c):
if type(i) == str:
setattr(self, i, c)
else:
setattr(self, "rgb"[i], c)
def __iter__(self):
yield self.r
yield self.g
yield self.b
# Should be valid on any platform, check it! Has to be done after Color is defined.
if MAC:
from cprotos import setup_protos
setup_protos(_lib)
_lib.TCOD_color_equals.restype = c_bool
_lib.TCOD_color_multiply.restype = Color
_lib.TCOD_color_multiply_scalar.restype = Color
_lib.TCOD_color_add.restype = Color
_lib.TCOD_color_subtract.restype = Color
# default colors
# grey levels
black=Color(0,0,0)
darkest_grey=Color(31,31,31)
darker_grey=Color(63,63,63)
dark_grey=Color(95,95,95)
grey=Color(127,127,127)
light_grey=Color(159,159,159)
lighter_grey=Color(191,191,191)
lightest_grey=Color(223,223,223)
darkest_gray=Color(31,31,31)
darker_gray=Color(63,63,63)
dark_gray=Color(95,95,95)
gray=Color(127,127,127)
light_gray=Color(159,159,159)
lighter_gray=Color(191,191,191)
lightest_gray=Color(223,223,223)
white=Color(255,255,255)
# sepia
darkest_sepia=Color(31,24,15)
darker_sepia=Color(63,50,31)
dark_sepia=Color(94,75,47)
sepia=Color(127,101,63)
light_sepia=Color(158,134,100)
lighter_sepia=Color(191,171,143)
lightest_sepia=Color(222,211,195)
#standard colors
red=Color(255,0,0)
flame=Color(255,63,0)
orange=Color(255,127,0)
amber=Color(255,191,0)
yellow=Color(255,255,0)
lime=Color(191,255,0)
chartreuse=Color(127,255,0)
green=Color(0,255,0)
sea=Color(0,255,127)
turquoise=Color(0,255,191)
cyan=Color(0,255,255)
sky=Color(0,191,255)
azure=Color(0,127,255)
blue=Color(0,0,255)
han=Color(63,0,255)
violet=Color(127,0,255)
purple=Color(191,0,255)
fuchsia=Color(255,0,255)
magenta=Color(255,0,191)
pink=Color(255,0,127)
crimson=Color(255,0,63)
# dark colors
dark_red=Color(191,0,0)
dark_flame=Color(191,47,0)
dark_orange=Color(191,95,0)
dark_amber=Color(191,143,0)
dark_yellow=Color(191,191,0)
dark_lime=Color(143,191,0)
dark_chartreuse=Color(95,191,0)
dark_green=Color(0,191,0)
dark_sea=Color(0,191,95)
dark_turquoise=Color(0,191,143)
dark_cyan=Color(0,191,191)
dark_sky=Color(0,143,191)
dark_azure=Color(0,95,191)
dark_blue=Color(0,0,191)
dark_han=Color(47,0,191)
dark_violet=Color(95,0,191)
dark_purple=Color(143,0,191)
dark_fuchsia=Color(191,0,191)
dark_magenta=Color(191,0,143)
dark_pink=Color(191,0,95)
dark_crimson=Color(191,0,47)
# darker colors
darker_red=Color(127,0,0)
darker_flame=Color(127,31,0)
darker_orange=Color(127,63,0)
darker_amber=Color(127,95,0)
darker_yellow=Color(127,127,0)
darker_lime=Color(95,127,0)
darker_chartreuse=Color(63,127,0)
darker_green=Color(0,127,0)
darker_sea=Color(0,127,63)
darker_turquoise=Color(0,127,95)
darker_cyan=Color(0,127,127)
darker_sky=Color(0,95,127)
darker_azure=Color(0,63,127)
darker_blue=Color(0,0,127)
darker_han=Color(31,0,127)
darker_violet=Color(63,0,127)
darker_purple=Color(95,0,127)
darker_fuchsia=Color(127,0,127)
darker_magenta=Color(127,0,95)
darker_pink=Color(127,0,63)
darker_crimson=Color(127,0,31)
# darkest colors
darkest_red=Color(63,0,0)
darkest_flame=Color(63,15,0)
darkest_orange=Color(63,31,0)
darkest_amber=Color(63,47,0)
darkest_yellow=Color(63,63,0)
darkest_lime=Color(47,63,0)
darkest_chartreuse=Color(31,63,0)
darkest_green=Color(0,63,0)
darkest_sea=Color(0,63,31)
darkest_turquoise=Color(0,63,47)
darkest_cyan=Color(0,63,63)
darkest_sky=Color(0,47,63)
darkest_azure=Color(0,31,63)
darkest_blue=Color(0,0,63)
darkest_han=Color(15,0,63)
darkest_violet=Color(31,0,63)
darkest_purple=Color(47,0,63)
darkest_fuchsia=Color(63,0,63)
darkest_magenta=Color(63,0,47)
darkest_pink=Color(63,0,31)
darkest_crimson=Color(63,0,15)
# light colors
light_red=Color(255,114,114)
light_flame=Color(255,149,114)
light_orange=Color(255,184,114)
light_amber=Color(255,219,114)
light_yellow=Color(255,255,114)
light_lime=Color(219,255,114)
light_chartreuse=Color(184,255,114)
light_green=Color(114,255,114)
light_sea=Color(114,255,184)
light_turquoise=Color(114,255,219)
light_cyan=Color(114,255,255)
light_sky=Color(114,219,255)
light_azure=Color(114,184,255)
light_blue=Color(114,114,255)
light_han=Color(149,114,255)
light_violet=Color(184,114,255)
light_purple=Color(219,114,255)
light_fuchsia=Color(255,114,255)
light_magenta=Color(255,114,219)
light_pink=Color(255,114,184)
light_crimson=Color(255,114,149)
#lighter colors
lighter_red=Color(255,165,165)
lighter_flame=Color(255,188,165)
lighter_orange=Color(255,210,165)
lighter_amber=Color(255,232,165)
lighter_yellow=Color(255,255,165)
lighter_lime=Color(232,255,165)
lighter_chartreuse=Color(210,255,165)
lighter_green=Color(165,255,165)
lighter_sea=Color(165,255,210)
lighter_turquoise=Color(165,255,232)
lighter_cyan=Color(165,255,255)
lighter_sky=Color(165,232,255)
lighter_azure=Color(165,210,255)
lighter_blue=Color(165,165,255)
lighter_han=Color(188,165,255)
lighter_violet=Color(210,165,255)
lighter_purple=Color(232,165,255)
lighter_fuchsia=Color(255,165,255)
lighter_magenta=Color(255,165,232)
lighter_pink=Color(255,165,210)
lighter_crimson=Color(255,165,188)
# lightest colors
lightest_red=Color(255,191,191)
lightest_flame=Color(255,207,191)
lightest_orange=Color(255,223,191)
lightest_amber=Color(255,239,191)
lightest_yellow=Color(255,255,191)
lightest_lime=Color(239,255,191)
lightest_chartreuse=Color(223,255,191)
lightest_green=Color(191,255,191)
lightest_sea=Color(191,255,223)
lightest_turquoise=Color(191,255,239)
lightest_cyan=Color(191,255,255)
lightest_sky=Color(191,239,255)
lightest_azure=Color(191,223,255)
lightest_blue=Color(191,191,255)
lightest_han=Color(207,191,255)
lightest_violet=Color(223,191,255)
lightest_purple=Color(239,191,255)
lightest_fuchsia=Color(255,191,255)
lightest_magenta=Color(255,191,239)
lightest_pink=Color(255,191,223)
lightest_crimson=Color(255,191,207)
# desaturated colors
desaturated_red=Color(127,63,63)
desaturated_flame=Color(127,79,63)
desaturated_orange=Color(127,95,63)
desaturated_amber=Color(127,111,63)
desaturated_yellow=Color(127,127,63)
desaturated_lime=Color(111,127,63)
desaturated_chartreuse=Color(95,127,63)
desaturated_green=Color(63,127,63)
desaturated_sea=Color(63,127,95)
desaturated_turquoise=Color(63,127,111)
desaturated_cyan=Color(63,127,127)
desaturated_sky=Color(63,111,127)
desaturated_azure=Color(63,95,127)
desaturated_blue=Color(63,63,127)
desaturated_han=Color(79,63,127)
desaturated_violet=Color(95,63,127)
desaturated_purple=Color(111,63,127)
desaturated_fuchsia=Color(127,63,127)
desaturated_magenta=Color(127,63,111)
desaturated_pink=Color(127,63,95)
desaturated_crimson=Color(127,63,79)
# metallic
brass=Color(191,151,96)
copper=Color(197,136,124)
gold=Color(229,191,0)
silver=Color(203,203,203)
# miscellaneous
celadon=Color(172,255,175)
peach=Color(255,159,127)
# color functions
_lib.TCOD_color_lerp.restype = Color
def color_lerp(c1, c2, a):
return _lib.TCOD_color_lerp(c1, c2, c_float(a))
def color_set_hsv(c, h, s, v):
_lib.TCOD_color_set_HSV(byref(c), c_float(h), c_float(s), c_float(v))
def color_get_hsv(c):
h = c_float()
s = c_float()
v = c_float()
_lib.TCOD_color_get_HSV(c, byref(h), byref(s), byref(v))
return h.value, s.value, v.value
def color_scale_HSV(c, scoef, vcoef) :
_lib.TCOD_color_scale_HSV(byref(c),c_float(scoef),c_float(vcoef))
def color_gen_map(colors, indexes):
ccolors = (Color * len(colors))(*colors)
cindexes = (c_int * len(indexes))(*indexes)
cres = (Color * (max(indexes) + 1))()
_lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)
return cres
############################
# console module
############################
class Key(Structure):
_fields_=[('vk', c_int),
('c', c_uint8),
('pressed', c_bool),
('lalt', c_bool),
('lctrl', c_bool),
('ralt', c_bool),
('rctrl', c_bool),
('shift', c_bool),
]
class ConsoleBuffer:
# simple console that allows direct (fast) access to cells. simplifies
# use of the "fill" functions.
def __init__(self, width, height, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# initialize with given width and height. values to fill the buffer
# are optional, defaults to black with no characters.
n = width * height
self.width = width
self.height = height
self.clear(back_r, back_g, back_b, fore_r, fore_g, fore_b, char)
def clear(self, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# clears the console. values to fill it with are optional, defaults
# to black with no characters.
n = self.width * self.height
self.back_r = [back_r] * n
self.back_g = [back_g] * n
self.back_b = [back_b] * n
self.fore_r = [fore_r] * n
self.fore_g = [fore_g] * n
self.fore_b = [fore_b] * n
self.char = [ord(char)] * n
def copy(self):
# returns a copy of this ConsoleBuffer.
other = ConsoleBuffer(0, 0)
other.width = self.width
other.height = self.height
other.back_r = list(self.back_r) # make explicit copies of all lists
other.back_g = list(self.back_g)
other.back_b = list(self.back_b)
other.fore_r = list(self.fore_r)
other.fore_g = list(self.fore_g)
other.fore_b = list(self.fore_b)
other.char = list(self.char)
return other
def set_fore(self, x, y, r, g, b, char):
# set the character and foreground color of one cell.
i = self.width * y + x
self.fore_r[i] = r
self.fore_g[i] = g
self.fore_b[i] = b
self.char[i] = ord(char)
def set_back(self, x, y, r, g, b):
# set the background color of one cell.
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
def set(self, x, y, back_r, back_g, back_b, fore_r, fore_g, fore_b, char):
# set the background color, foreground color and character of one cell.
i = self.width * y + x
self.back_r[i] = back_r
self.back_g[i] = back_g
self.back_b[i] = back_b
self.fore_r[i] = fore_r
self.fore_g[i] = fore_g
self.fore_b[i] = fore_b
self.char[i] = ord(char)
def blit(self, dest, fill_fore=True, fill_back=True):
# use libtcod's "fill" functions to write the buffer to a console.
if (console_get_width(dest) != self.width or
console_get_height(dest) != self.height):
raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')
s = struct.Struct('%di' % len(self.back_r))
if fill_back:
_lib.TCOD_console_fill_background(dest, (c_int * len(self.back_r))(*self.back_r), (c_int * len(self.back_g))(*self.back_g), (c_int * len(self.back_b))(*self.back_b))
if fill_fore:
_lib.TCOD_console_fill_foreground(dest, (c_int * len(self.fore_r))(*self.fore_r), (c_int * len(self.fore_g))(*self.fore_g), (c_int * len(self.fore_b))(*self.fore_b))
_lib.TCOD_console_fill_char(dest, (c_int * len(self.char))(*self.char))
_lib.TCOD_console_credits_render.restype = c_bool
_lib.TCOD_console_is_fullscreen.restype = c_bool
_lib.TCOD_console_is_window_closed.restype = c_bool
_lib.TCOD_console_get_default_background.restype = Color
_lib.TCOD_console_get_default_foreground.restype = Color
_lib.TCOD_console_get_char_background.restype = Color
_lib.TCOD_console_get_char_foreground.restype = Color
_lib.TCOD_console_get_fading_color.restype = Color
_lib.TCOD_console_is_key_pressed.restype = c_bool
# background rendering modes
BKGND_NONE = 0
BKGND_SET = 1
BKGND_MULTIPLY = 2
BKGND_LIGHTEN = 3
BKGND_DARKEN = 4
BKGND_SCREEN = 5
BKGND_COLOR_DODGE = 6
BKGND_COLOR_BURN = 7
BKGND_ADD = 8
BKGND_ADDA = 9
BKGND_BURN = 10
BKGND_OVERLAY = 11
BKGND_ALPH = 12
BKGND_DEFAULT=13
def BKGND_ALPHA(a):
return BKGND_ALPH | (int(a * 255) << 8)
def BKGND_ADDALPHA(a):
return BKGND_ADDA | (int(a * 255) << 8)
# non blocking key events types
KEY_PRESSED = 1
KEY_RELEASED = 2
# key codes
KEY_NONE = 0
KEY_ESCAPE = 1
KEY_BACKSPACE = 2
KEY_TAB = 3
KEY_ENTER = 4
KEY_SHIFT = 5
KEY_CONTROL = 6
KEY_ALT = 7
KEY_PAUSE = 8
KEY_CAPSLOCK = 9
KEY_PAGEUP = 10
KEY_PAGEDOWN = 11
KEY_END = 12
KEY_HOME = 13
KEY_UP = 14
KEY_LEFT = 15
KEY_RIGHT = 16
KEY_DOWN = 17
KEY_PRINTSCREEN = 18
KEY_INSERT = 19
KEY_DELETE = 20
KEY_LWIN = 21
KEY_RWIN = 22
KEY_APPS = 23
KEY_0 = 24
KEY_1 = 25
KEY_2 = 26
KEY_3 = 27
KEY_4 = 28
KEY_5 = 29
KEY_6 = 30
KEY_7 = 31
KEY_8 = 32
KEY_9 = 33
KEY_KP0 = 34
KEY_KP1 = 35
KEY_KP2 = 36
KEY_KP3 = 37
KEY_KP4 = 38
KEY_KP5 = 39
KEY_KP6 = 40
KEY_KP7 = 41
KEY_KP8 = 42
KEY_KP9 = 43
KEY_KPADD = 44
KEY_KPSUB = 45
KEY_KPDIV = 46
KEY_KPMUL = 47
KEY_KPDEC = 48
KEY_KPENTER = 49
KEY_F1 = 50
KEY_F2 = 51
KEY_F3 = 52
KEY_F4 = 53
KEY_F5 = 54
KEY_F6 = 55
KEY_F7 = 56
KEY_F8 = 57
KEY_F9 = 58
KEY_F10 = 59
KEY_F11 = 60
KEY_F12 = 61
KEY_NUMLOCK = 62
KEY_SCROLLLOCK = 63
KEY_SPACE = 64
KEY_CHAR = 65
# special chars
# single walls
CHAR_HLINE = 196
CHAR_VLINE = 179
CHAR_NE = 191
CHAR_NW = 218
CHAR_SE = 217
CHAR_SW = 192
CHAR_TEEW = 180
CHAR_TEEE = 195
CHAR_TEEN = 193
CHAR_TEES = 194
CHAR_CROSS = 197
# double walls
CHAR_DHLINE = 205
CHAR_DVLINE = 186
CHAR_DNE = 187
CHAR_DNW = 201
CHAR_DSE = 188
CHAR_DSW = 200
CHAR_DTEEW = 185
CHAR_DTEEE = 204
CHAR_DTEEN = 202
CHAR_DTEES = 203
CHAR_DCROSS = 206
# blocks
CHAR_BLOCK1 = 176
CHAR_BLOCK2 = 177
CHAR_BLOCK3 = 178
# arrows
CHAR_ARROW_N = 24
CHAR_ARROW_S = 25
CHAR_ARROW_E = 26
CHAR_ARROW_W = 27
# arrows without tail
CHAR_ARROW2_N = 30
CHAR_ARROW2_S = 31
CHAR_ARROW2_E = 16
CHAR_ARROW2_W = 17
# double arrows
CHAR_DARROW_H = 29
CHAR_DARROW_V = 18
# GUI stuff
CHAR_CHECKBOX_UNSET = 224
CHAR_CHECKBOX_SET = 225
CHAR_RADIO_UNSET = 9
CHAR_RADIO_SET = 10
# sub-pixel resolution kit
CHAR_SUBP_NW = 226
CHAR_SUBP_NE = 227
CHAR_SUBP_N = 228
CHAR_SUBP_SE = 229
CHAR_SUBP_DIAG = 230
CHAR_SUBP_E = 231
CHAR_SUBP_SW = 232
# misc characters
CHAR_BULLET = 7
CHAR_BULLET_INV = 8
CHAR_BULLET_SQUARE = 254
CHAR_CENT = 189
CHAR_CLUB = 5
CHAR_COPYRIGHT = 184
CHAR_CURRENCY = 207
CHAR_DIAMOND = 4
CHAR_DIVISION = 246
CHAR_EXCLAM_DOUBLE = 19
CHAR_FEMALE = 12
CHAR_FUNCTION = 159
CHAR_GRADE = 248
CHAR_HALF = 171
CHAR_HEART = 3
CHAR_LIGHT = 15
CHAR_MALE = 11
CHAR_MULTIPLICATION = 158
CHAR_NOTE = 13
CHAR_NOTE_DOUBLE = 14
CHAR_ONE_QUARTER = 172
CHAR_PILCROW = 20
CHAR_POUND = 156
CHAR_POW1 = 251
CHAR_POW2 = 253
CHAR_POW3 = 252
CHAR_RESERVED = 169
CHAR_SECTION = 21
CHAR_SMILIE = 1
CHAR_SMILIE_INV = 2
CHAR_SPADE = 6
CHAR_THREE_QUARTERS = 243
CHAR_UMLAUT = 249
CHAR_YEN = 190
# font flags
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_TYPE_GRAYSCALE = 4
FONT_LAYOUT_TCOD = 8
# color control codes
COLCTRL_1=1
COLCTRL_2=2
COLCTRL_3=3
COLCTRL_4=4
COLCTRL_5=5
COLCTRL_NUMBER=5
COLCTRL_FORE_RGB=6
COLCTRL_BACK_RGB=7
COLCTRL_STOP=8
# renderers
RENDERER_GLSL=0
RENDERER_OPENGL=1
RENDERER_SDL=2
NB_RENDERERS=3
# alignment
LEFT=0
RIGHT=1
CENTER=2
# initializing the console
def console_init_root(w, h, title, fullscreen=False, renderer=RENDERER_SDL):
_lib.TCOD_console_init_root(w, h, c_char_p(title), fullscreen, renderer)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_set_custom_font(fontFile, flags=FONT_LAYOUT_ASCII_INCOL, nb_char_horiz=0, nb_char_vertic=0):
_lib.TCOD_console_set_custom_font(c_char_p(fontFile), flags, nb_char_horiz, nb_char_vertic)
def console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY):
if type(asciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_code_to_font(ord(asciiCode), fontCharX,
fontCharY)
else:
_lib.TCOD_console_map_ascii_code_to_font(asciiCode, fontCharX,
fontCharY)
def console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX,
fontCharY):
if type(firstAsciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_codes_to_font(ord(firstAsciiCode), nbCodes,
fontCharX, fontCharY)
else:
_lib.TCOD_console_map_ascii_codes_to_font(firstAsciiCode, nbCodes,
fontCharX, fontCharY)
def console_map_string_to_font(s, fontCharX, fontCharY):
if type(s) == bytes:
_lib.TCOD_console_map_string_to_font(s, fontCharX, fontCharY)
else:
_lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)
def console_is_fullscreen():
return _lib.TCOD_console_is_fullscreen()
def console_set_fullscreen(fullscreen):
_lib.TCOD_console_set_fullscreen(c_int(fullscreen))
def console_is_window_closed():
return _lib.TCOD_console_is_window_closed()
def console_set_window_title(title):
_lib.TCOD_console_set_window_title(c_char_p(title))
def console_credits():
_lib.TCOD_console_credits()
def console_credits_reset():
_lib.TCOD_console_credits_reset()
def console_credits_render(x, y, alpha):
return _lib.TCOD_console_credits_render(x, y, c_int(alpha))
def console_flush():
_lib.TCOD_console_flush()
# drawing on a console
def console_set_default_background(con, col):
_lib.TCOD_console_set_default_background(con, col)
def console_set_default_foreground(con, col):
_lib.TCOD_console_set_default_foreground(con, col)
def console_clear(con):
return _lib.TCOD_console_clear(con)
def console_put_char(con, x, y, c, flag=BKGND_DEFAULT):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char(con, x, y, ord(c), flag)
else:
_lib.TCOD_console_put_char(con, x, y, c, flag)
def console_put_char_ex(con, x, y, c, fore, back):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char_ex(con, x, y, ord(c), fore, back)
else:
_lib.TCOD_console_put_char_ex(con, x, y, c, fore, back)
def console_set_char_background(con, x, y, col, flag=BKGND_SET):
_lib.TCOD_console_set_char_background(con, x, y, col, flag)
def console_set_char_foreground(con, x, y, col):
_lib.TCOD_console_set_char_foreground(con, x, y, col)
def console_set_char(con, x, y, c):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_set_char(con, x, y, ord(c))
else:
_lib.TCOD_console_set_char(con, x, y, c)
def console_set_background_flag(con, flag):
_lib.TCOD_console_set_background_flag(con, c_int(flag))
def console_get_background_flag(con):
return _lib.TCOD_console_get_background_flag(con)
def console_set_alignment(con, alignment):
_lib.TCOD_console_set_alignment(con, c_int(alignment))
def console_get_alignment(con):
return _lib.TCOD_console_get_alignment(con)
def console_print(con, x, y, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print(c_void_p(con), x, y, c_char_p(fmt))
else:
_lib.TCOD_console_print_utf(c_void_p(con), x, y, fmt)
def console_print_ex(con, x, y, flag, alignment, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print_ex(c_void_p(con), x, y, flag, alignment, c_char_p(fmt))
else:
_lib.TCOD_console_print_ex_utf(c_void_p(con), x, y, flag, alignment, fmt)
def console_print_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_print_rect_ex(con, x, y, w, h, flag, alignment, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect_ex(c_void_p(con), x, y, w, h, flag, alignment, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_ex_utf(c_void_p(con), x, y, w, h, flag, alignment, fmt)
def console_get_height_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_get_height_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_get_height_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_rect(con, x, y, w, h, clr, flag=BKGND_DEFAULT):
_lib.TCOD_console_rect(con, x, y, w, h, c_int(clr), flag)
def console_hline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_hline( con, x, y, l, flag)
def console_vline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_vline( con, x, y, l, flag)
def console_print_frame(con, x, y, w, h, clear=True, flag=BKGND_DEFAULT, fmt=0):
_lib.TCOD_console_print_frame(c_void_p(con), x, y, w, h, c_int(clear), flag, c_char_p(fmt))
def console_set_color_control(con,fore,back) :
_lib.TCOD_console_set_color_control(con,fore,back)
def console_get_default_background(con):
return _lib.TCOD_console_get_default_background(con)
def console_get_default_foreground(con):
return _lib.TCOD_console_get_default_foreground(con)
def console_get_char_background(con, x, y):
return _lib.TCOD_console_get_char_background(con, x, y)
def console_get_char_foreground(con, x, y):
return _lib.TCOD_console_get_char_foreground(con, x, y)
def console_get_char(con, x, y):
return _lib.TCOD_console_get_char(con, x, y)
def console_set_fade(fade, fadingColor):
_lib.TCOD_console_set_fade(fade, fadingColor)
##_lib.TCOD_console_set_fade_wrapper(fade, fadingColor)
def console_get_fade():
return _lib.TCOD_console_get_fade().value
def console_get_fading_color():
return _lib.TCOD_console_get_fading_color()
# handling keyboard input
def console_wait_for_keypress(flush):
k=Key()
_lib.TCOD_console_wait_for_keypress_wrapper(byref(k),c_bool(flush))
return k
def console_check_for_keypress(flags=KEY_RELEASED):
k=Key()
_lib.TCOD_console_check_for_keypress_wrapper(byref(k),c_int(flags))
return k
def console_is_key_pressed(key):
return _lib.TCOD_console_is_key_pressed(key)
def console_set_keyboard_repeat(initial_delay, interval):
_lib.TCOD_console_set_keyboard_repeat(initial_delay, interval)
def console_disable_keyboard_repeat():
_lib.TCOD_console_disable_keyboard_repeat()
# using offscreen consoles
def console_new(w, h):
return _lib.TCOD_console_new(w, h)
def console_from_file(filename):
return _lib.TCOD_console_from_file(filename)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_blit(src, x, y, w, h, dst, xdst, ydst, ffade=1.0,bfade=1.0):
_lib.TCOD_console_blit(src, x, y, w, h, dst, xdst, ydst, c_float(ffade), c_float(bfade))
def console_set_key_color(con, col):
_lib.TCOD_console_set_key_color(con, col)
def console_delete(con):
_lib.TCOD_console_delete(con)
# fast color filling
def console_fill_foreground(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_foreground(con, cr, cg, cb)
def console_fill_background(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_background(con, cr, cg, cb)
def console_fill_char(con,arr) :
if (numpy_available and isinstance(arr, numpy.ndarray) ):
#numpy arrays, use numpy's ctypes functions
arr = numpy.ascontiguousarray(arr, dtype=numpy.int_)
carr = arr.ctypes.data_as(POINTER(c_int))
else:
#otherwise convert using the struct module
carr = struct.pack('%di' % len(arr), *arr)
_lib.TCOD_console_fill_char(con, carr)
def console_load_asc(con, filename) :
_lib.TCOD_console_load_asc(con,filename)
def console_save_asc(con, filename) :
_lib.TCOD_console_save_asc(con,filename)
def console_load_apf(con, filename) :
_lib.TCOD_console_load_apf(con,filename)
def console_save_apf(con, filename) :
_lib.TCOD_console_save_apf(con,filename)
############################
# sys module
############################
_lib.TCOD_sys_get_last_frame_length.restype = c_float
_lib.TCOD_sys_elapsed_seconds.restype = c_float
# high precision time functions
def sys_set_fps(fps):
_lib.TCOD_sys_set_fps(fps)
def sys_get_fps():
return _lib.TCOD_sys_get_fps()
def sys_get_last_frame_length():
return _lib.TCOD_sys_get_last_frame_length()
def sys_sleep_milli(val):
_lib.TCOD_sys_sleep_milli(c_uint(val))
def sys_elapsed_milli():
return _lib.TCOD_sys_elapsed_milli()
def sys_elapsed_seconds():
return _lib.TCOD_sys_elapsed_seconds()
def sys_set_renderer(renderer):
_lib.TCOD_sys_set_renderer(renderer)
def sys_get_renderer():
return _lib.TCOD_sys_get_renderer()
# easy screenshots
def sys_save_screenshot(name=0):
_lib.TCOD_sys_save_screenshot(c_char_p(name))
# custom fullscreen resolution
def sys_force_fullscreen_resolution(width, height):
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
def sys_get_current_resolution():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_current_resolution(byref(w), byref(h))
return w.value, h.value
def sys_get_char_size():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_char_size(byref(w), byref(h))
return w.value, h.value
# update font bitmap
def sys_update_char(asciiCode, fontx, fonty, img, x, y) :
_lib.TCOD_sys_update_char(c_int(asciiCode),c_int(fontx),c_int(fonty),img,c_int(x),c_int(y))
# custom SDL post renderer
SDL_RENDERER_FUNC = CFUNCTYPE(None, c_void_p)
def sys_register_SDL_renderer(callback):
global sdl_renderer_func
sdl_renderer_func = SDL_RENDERER_FUNC(callback)
_lib.TCOD_sys_register_SDL_renderer(sdl_renderer_func)
# events
EVENT_KEY_PRESS=1
EVENT_KEY_RELEASE=2
EVENT_KEY=EVENT_KEY_PRESS|EVENT_KEY_RELEASE
EVENT_MOUSE_MOVE=4
EVENT_MOUSE_PRESS=8
EVENT_MOUSE_RELEASE=16
EVENT_MOUSE=EVENT_MOUSE_MOVE|EVENT_MOUSE_PRESS|EVENT_MOUSE_RELEASE
EVENT_ANY=EVENT_KEY|EVENT_MOUSE
def sys_check_for_event(mask,k,m) :
return _lib.TCOD_sys_check_for_event(c_int(mask),byref(k),byref(m))
def sys_wait_for_event(mask,k,m,flush) :
return _lib.TCOD_sys_wait_for_event(c_int(mask),byref(k),byref(m),c_bool(flush))
############################
# line module
############################
_lib.TCOD_line_step.restype = c_bool
_lib.TCOD_line.restype=c_bool
_lib.TCOD_line_step_mt.restype = c_bool
def line_init(xo, yo, xd, yd):
_lib.TCOD_line_init(xo, yo, xd, yd)
def line_step():
x = c_int()
y = c_int()
ret = _lib.TCOD_line_step(byref(x), byref(y))
if not ret:
return x.value, y.value
return None,None
def line(xo,yo,xd,yd,py_callback) :
LINE_CBK_FUNC=CFUNCTYPE(c_bool,c_int,c_int)
c_callback=LINE_CBK_FUNC(py_callback)
return _lib.TCOD_line(xo,yo,xd,yd,c_callback)
def line_iter(xo, yo, xd, yd):
data = (c_int * 9)() # struct TCOD_bresenham_data_t
_lib.TCOD_line_init_mt(xo, yo, xd, yd, data)
x = c_int(xo)
y = c_int(yo)
done = False
while not done:
yield x.value, y.value
done = _lib.TCOD_line_step_mt(byref(x), byref(y), data)
############################
# image module
############################
_lib.TCOD_image_is_pixel_transparent.restype = c_bool
_lib.TCOD_image_get_pixel.restype = Color
_lib.TCOD_image_get_mipmap_pixel.restype = Color
def image_new(width, height):
return _lib.TCOD_image_new(width, height)
def image_clear(image,col) :
_lib.TCOD_image_clear(image,col)
def image_invert(image) :
_lib.TCOD_image_invert(image)
def image_hflip(image) :
_lib.TCOD_image_hflip(image)
def image_rotate90(image, num=1) :
_lib.TCOD_image_rotate90(image,num)
def image_vflip(image) :
_lib.TCOD_image_vflip(image)
def image_scale(image, neww, newh) :
_lib.TCOD_image_scale(image,c_int(neww),c_int(newh))
def image_set_key_color(image,col) :
_lib.TCOD_image_set_key_color(image,col)
def image_get_alpha(image,x,y) :
return _lib.TCOD_image_get_alpha(image,c_int(x),c_int(y))
def image_is_pixel_transparent(image,x,y) :
return _lib.TCOD_image_is_pixel_transparent(image,c_int(x),c_int(y))
def image_load(filename):
return _lib.TCOD_image_load(c_char_p(filename))
def image_from_console(console):
return _lib.TCOD_image_from_console(console)
def image_refresh_console(image, console):
_lib.TCOD_image_refresh_console(image, console)
def image_get_size(image):
w=c_int()
h=c_int()
_lib.TCOD_image_get_size(image, byref(w), byref(h))
return w.value, h.value
def image_get_pixel(image, x, y):
return _lib.TCOD_image_get_pixel(image, x, y)
def image_get_mipmap_pixel(image, x0, y0, x1, y1):
return _lib.TCOD_image_get_mipmap_pixel(image, c_float(x0), c_float(y0),
c_float(x1), c_float(y1))
def image_put_pixel(image, x, y, col):
_lib.TCOD_image_put_pixel(image, x, y, col)
##_lib.TCOD_image_put_pixel_wrapper(image, x, y, col)
def image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle):
_lib.TCOD_image_blit(image, console, c_float(x), c_float(y), bkgnd_flag,
c_float(scalex), c_float(scaley), c_float(angle))
def image_blit_rect(image, console, x, y, w, h, bkgnd_flag):
_lib.TCOD_image_blit_rect(image, console, x, y, w, h, bkgnd_flag)
def image_blit_2x(image, console, dx, dy, sx=0, sy=0, w=-1, h=-1):
_lib.TCOD_image_blit_2x(image, console, dx,dy,sx,sy,w,h)
def image_save(image, filename):
_lib.TCOD_image_save(image, c_char_p(filename))
def image_delete(image):
_lib.TCOD_image_delete(image)
############################
# mouse module
############################
class Mouse(Structure):
_fields_=[('x', c_int),
('y', c_int),
('dx', c_int),
('dy', c_int),
('cx', c_int),
('cy', c_int),
('dcx', c_int),
('dcy', c_int),
('lbutton', c_bool),
('rbutton', c_bool),
('mbutton', c_bool),
('lbutton_pressed', c_bool),
('rbutton_pressed', c_bool),
('mbutton_pressed', c_bool),
('wheel_up', c_bool),
('wheel_down', c_bool),
]
_lib.TCOD_mouse_is_cursor_visible.restype = c_bool
def mouse_show_cursor(visible):
_lib.TCOD_mouse_show_cursor(c_int(visible))
def mouse_is_cursor_visible():
return _lib.TCOD_mouse_is_cursor_visible()
def mouse_move(x, y):
_lib.TCOD_mouse_move(x, y)
def mouse_get_status():
mouse=Mouse()
_lib.TCOD_mouse_get_status_wrapper(byref(mouse))
return mouse
############################
# parser module
############################
_lib.TCOD_struct_get_name.restype = c_char_p
_lib.TCOD_struct_is_mandatory.restype = c_bool
_lib.TCOD_parser_get_bool_property.restype = c_bool
_lib.TCOD_parser_get_float_property.restype = c_float
_lib.TCOD_parser_get_string_property.restype = c_char_p
_lib.TCOD_parser_get_color_property.restype = Color
class Dice(Structure):
_fields_=[('nb_dices', c_int),
('nb_faces', c_int),
('multiplier', c_float),
('addsub', c_float),
]
def __repr__(self):
return "Dice(%d, %d, %s, %s)" % (self.nb_dices, self.nb_faces,
self.multiplier, self.addsub)
class _CValue(Union):
_fields_=[('c',c_uint8),
('i',c_int),
('f',c_float),
('s',c_char_p),
# JBR03192012 See http://bugs.python.org/issue14354 for why these are not defined as their actual types
('col',c_uint8 * 3),
('dice',c_int * 4),
('custom',c_void_p),
]
_CFUNC_NEW_STRUCT = CFUNCTYPE(c_uint, c_void_p, c_char_p)
_CFUNC_NEW_FLAG = CFUNCTYPE(c_uint, c_char_p)
_CFUNC_NEW_PROPERTY = CFUNCTYPE(c_uint, c_char_p, c_int, _CValue)
class _CParserListener(Structure):
_fields_=[('new_struct', _CFUNC_NEW_STRUCT),
('new_flag',_CFUNC_NEW_FLAG),
('new_property',_CFUNC_NEW_PROPERTY),
('end_struct',_CFUNC_NEW_STRUCT),
('error',_CFUNC_NEW_FLAG),
]
# property types
TYPE_NONE = 0
TYPE_BOOL = 1
TYPE_CHAR = 2
TYPE_INT = 3
TYPE_FLOAT = 4
TYPE_STRING = 5
TYPE_COLOR = 6
TYPE_DICE = 7
TYPE_VALUELIST00 = 8
TYPE_VALUELIST01 = 9
TYPE_VALUELIST02 = 10
TYPE_VALUELIST03 = 11
TYPE_VALUELIST04 = 12
TYPE_VALUELIST05 = 13
TYPE_VALUELIST06 = 14
TYPE_VALUELIST07 = 15
TYPE_VALUELIST08 = 16
TYPE_VALUELIST09 = 17
TYPE_VALUELIST10 = 18
TYPE_VALUELIST11 = 19
TYPE_VALUELIST12 = 20
TYPE_VALUELIST13 = 21
TYPE_VALUELIST14 = 22
TYPE_VALUELIST15 = 23
TYPE_LIST = 1024
def _convert_TCODList(clist, typ):
res = list()
for i in range(_lib.TCOD_list_size(clist)):
elt = _lib.TCOD_list_get(clist, i)
elt = cast(elt, c_void_p)
if typ == TYPE_BOOL:
elt = c_bool.from_buffer(elt).value
elif typ == TYPE_CHAR:
elt = c_char.from_buffer(elt).value
elif typ == TYPE_INT:
elt = c_int.from_buffer(elt).value
elif typ == TYPE_FLOAT:
elt = c_float.from_buffer(elt).value
elif typ == TYPE_STRING or TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
elt = cast(elt, c_char_p).value
elif typ == TYPE_COLOR:
elt = Color.from_buffer_copy(elt)
elif typ == TYPE_DICE:
# doesn't work
elt = Dice.from_buffer_copy(elt)
res.append(elt)
return res
def parser_new():
return _lib.TCOD_parser_new()
def parser_new_struct(parser, name):
return _lib.TCOD_parser_new_struct(parser, name)
def struct_add_flag(struct, name):
_lib.TCOD_struct_add_flag(struct, name)
def struct_add_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_property(struct, name, typ, c_bool(mandatory))
def struct_add_value_list(struct, name, value_list, mandatory):
CARRAY = c_char_p * (len(value_list) + 1)
cvalue_list = CARRAY()
for i in range(len(value_list)):
cvalue_list[i] = cast(value_list[i], c_char_p)
cvalue_list[len(value_list)] = 0
_lib.TCOD_struct_add_value_list(struct, name, cvalue_list, c_bool(mandatory))
def struct_add_list_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_list_property(struct, name, typ, c_bool(mandatory))
def struct_add_structure(struct, sub_struct):
_lib.TCOD_struct_add_structure(struct, sub_struct)
def struct_get_name(struct):
return _lib.TCOD_struct_get_name(struct)
def struct_is_mandatory(struct, name):
return _lib.TCOD_struct_is_mandatory(struct, name)
def struct_get_type(struct, name):
return _lib.TCOD_struct_get_type(struct, name)
def parser_run(parser, filename, listener=0):
if listener != 0:
clistener=_CParserListener()
def value_converter(name, typ, value):
if typ == TYPE_BOOL:
return listener.new_property(name, typ, value.c == 1)
elif typ == TYPE_CHAR:
return listener.new_property(name, typ, '%c' % (value.c & 0xFF))
elif typ == TYPE_INT:
return listener.new_property(name, typ, value.i)
elif typ == TYPE_FLOAT:
return listener.new_property(name, typ, value.f)
elif typ == TYPE_STRING or \
TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
return listener.new_property(name, typ, value.s)
elif typ == TYPE_COLOR:
col = cast(value.col, POINTER(Color)).contents
return listener.new_property(name, typ, col)
elif typ == TYPE_DICE:
dice = cast(value.dice, POINTER(Dice)).contents
return listener.new_property(name, typ, dice)
elif typ & TYPE_LIST:
return listener.new_property(name, typ,
_convert_TCODList(value.custom, typ & 0xFF))
return True
clistener.new_struct = _CFUNC_NEW_STRUCT(listener.new_struct)
clistener.new_flag = _CFUNC_NEW_FLAG(listener.new_flag)
clistener.new_property = _CFUNC_NEW_PROPERTY(value_converter)
clistener.end_struct = _CFUNC_NEW_STRUCT(listener.end_struct)
clistener.error = _CFUNC_NEW_FLAG(listener.error)
_lib.TCOD_parser_run(parser, c_char_p(filename), byref(clistener))
else:
_lib.TCOD_parser_run(parser, c_char_p(filename), 0)
def parser_delete(parser):
_lib.TCOD_parser_delete(parser)
def parser_get_bool_property(parser, name):
return _lib.TCOD_parser_get_bool_property(parser, c_char_p(name))
def parser_get_int_property(parser, name):
return _lib.TCOD_parser_get_int_property(parser, c_char_p(name))
def parser_get_char_property(parser, name):
return '%c' % _lib.TCOD_parser_get_char_property(parser, c_char_p(name))
def parser_get_float_property(parser, name):
return _lib.TCOD_parser_get_float_property(parser, c_char_p(name))
def parser_get_string_property(parser, name):
return _lib.TCOD_parser_get_string_property(parser, c_char_p(name))
def parser_get_color_property(parser, name):
return _lib.TCOD_parser_get_color_property(parser, c_char_p(name))
def parser_get_dice_property(parser, name):
d = Dice()
_lib.TCOD_parser_get_dice_property_py(c_void_p(parser), c_char_p(name), byref(d))
return d
def parser_get_list_property(parser, name, typ):
clist = _lib.TCOD_parser_get_list_property(parser, c_char_p(name), c_int(typ))
return _convert_TCODList(clist, typ)
############################
# random module
############################
_lib.TCOD_random_get_float.restype = c_float
_lib.TCOD_random_get_double.restype = c_double
RNG_MT = 0
RNG_CMWC = 1
DISTRIBUTION_LINEAR = 0
DISTRIBUTION_GAUSSIAN = 1
DISTRIBUTION_GAUSSIAN_RANGE = 2
DISTRIBUTION_GAUSSIAN_INVERSE = 3
DISTRIBUTION_GAUSSIAN_RANGE_INVERSE = 4
def random_get_instance():
return _lib.TCOD_random_get_instance()
def random_new(algo=RNG_CMWC):
return _lib.TCOD_random_new(algo)
def random_new_from_seed(seed, algo=RNG_CMWC):
return _lib.TCOD_random_new_from_seed(algo,c_uint(seed))
def random_set_distribution(rnd, dist) :
_lib.TCOD_random_set_distribution(rnd, dist)
def random_get_int(rnd, mi, ma):
return _lib.TCOD_random_get_int(rnd, mi, ma)
def random_get_float(rnd, mi, ma):
return _lib.TCOD_random_get_float(rnd, c_float(mi), c_float(ma))
def random_get_double(rnd, mi, ma):
return _lib.TCOD_random_get_double(rnd, c_double(mi), c_double(ma))
def random_get_int_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_int_mean(rnd, mi, ma, mean)
def random_get_float_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_float_mean(rnd, c_float(mi), c_float(ma), c_float(mean))
def random_get_double_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_double_mean(rnd, c_double(mi), c_double(ma), c_double(mean))
def random_save(rnd):
return _lib.TCOD_random_save(rnd)
def random_restore(rnd, backup):
_lib.TCOD_random_restore(rnd, backup)
def random_delete(rnd):
_lib.TCOD_random_delete(rnd)
############################
# noise module
############################
_lib.TCOD_noise_get.restype = c_float
_lib.TCOD_noise_get_ex.restype = c_float
_lib.TCOD_noise_get_fbm.restype = c_float
_lib.TCOD_noise_get_fbm_ex.restype = c_float
_lib.TCOD_noise_get_turbulence.restype = c_float
_lib.TCOD_noise_get_turbulence_ex.restype = c_float
NOISE_DEFAULT_HURST = 0.5
NOISE_DEFAULT_LACUNARITY = 2.0
NOISE_DEFAULT = 0
NOISE_PERLIN = 1
NOISE_SIMPLEX = 2
NOISE_WAVELET = 4
_NOISE_PACKER_FUNC = (None,
(c_float * 1),
(c_float * 2),
(c_float * 3),
(c_float * 4),
)
def noise_new(dim, h=NOISE_DEFAULT_HURST, l=NOISE_DEFAULT_LACUNARITY, random=0):
return _lib.TCOD_noise_new(dim, c_float(h), c_float(l), random)
def noise_set_type(n, typ) :
_lib.TCOD_noise_set_type(n,typ)
def noise_get(n, f, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), typ)
def noise_get_fbm(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_fbm_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_get_turbulence(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_turbulence_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_delete(n):
_lib.TCOD_noise_delete(n)
############################
# fov module
############################
_lib.TCOD_map_is_in_fov.restype = c_bool
_lib.TCOD_map_is_transparent.restype = c_bool
_lib.TCOD_map_is_walkable.restype = c_bool
FOV_BASIC = 0
FOV_DIAMOND = 1
FOV_SHADOW = 2
FOV_PERMISSIVE_0 = 3
FOV_PERMISSIVE_1 = 4
FOV_PERMISSIVE_2 = 5
FOV_PERMISSIVE_3 = 6
FOV_PERMISSIVE_4 = 7
FOV_PERMISSIVE_5 = 8
FOV_PERMISSIVE_6 = 9
FOV_PERMISSIVE_7 = 10
FOV_PERMISSIVE_8 = 11
FOV_RESTRICTIVE = 12
NB_FOV_ALGORITHMS = 13
def FOV_PERMISSIVE(p) :
return FOV_PERMISSIVE_0+p
def map_new(w, h):
return _lib.TCOD_map_new(w, h)
def map_copy(source, dest):
return _lib.TCOD_map_copy(source, dest)
def map_set_properties(m, x, y, isTrans, isWalk):
_lib.TCOD_map_set_properties(m, x, y, c_int(isTrans), c_int(isWalk))
def map_clear(m,walkable=False,transparent=False):
_lib.TCOD_map_clear(m,c_int(walkable),c_int(transparent))
def map_compute_fov(m, x, y, radius=0, light_walls=True, algo=FOV_RESTRICTIVE ):
_lib.TCOD_map_compute_fov(m, x, y, c_int(radius), c_bool(light_walls), c_int(algo))
def map_is_in_fov(m, x, y):
return _lib.TCOD_map_is_in_fov(m, x, y)
def map_is_transparent(m, x, y):
return _lib.TCOD_map_is_transparent(m, x, y)
def map_is_walkable(m, x, y):
return _lib.TCOD_map_is_walkable(m, x, y)
def map_delete(m):
return _lib.TCOD_map_delete(m)
def map_get_width(map):
return _lib.TCOD_map_get_width(map)
def map_get_height(map):
return _lib.TCOD_map_get_height(map)
############################
# pathfinding module
############################
_lib.TCOD_path_compute.restype = c_bool
_lib.TCOD_path_is_empty.restype = c_bool
_lib.TCOD_path_walk.restype = c_bool
PATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)
def path_new_using_map(m, dcost=1.41):
return (_lib.TCOD_path_new_using_map(c_void_p(m), c_float(dcost)), None)
def path_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_new_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def path_compute(p, ox, oy, dx, dy):
return _lib.TCOD_path_compute(p[0], ox, oy, dx, dy)
def path_get_origin(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_origin(p[0], byref(x), byref(y))
return x.value, y.value
def path_get_destination(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_destination(p[0], byref(x), byref(y))
return x.value, y.value
def path_size(p):
return _lib.TCOD_path_size(p[0])
def path_reverse(p):
_lib.TCOD_path_reverse(p[0])
def path_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_path_get(p[0], idx, byref(x), byref(y))
return x.value, y.value
def path_is_empty(p):
return _lib.TCOD_path_is_empty(p[0])
def path_walk(p, recompute):
x = c_int()
y = c_int()
if _lib.TCOD_path_walk(p[0], byref(x), byref(y), c_int(recompute)):
return x.value, y.value
return None,None
def path_delete(p):
_lib.TCOD_path_delete(p[0])
_lib.TCOD_dijkstra_path_set.restype = c_bool
_lib.TCOD_dijkstra_is_empty.restype = c_bool
_lib.TCOD_dijkstra_path_walk.restype = c_bool
_lib.TCOD_dijkstra_get_distance.restype = c_float
def dijkstra_new(m, dcost=1.41):
return (_lib.TCOD_dijkstra_new(c_void_p(m), c_float(dcost)), None)
def dijkstra_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_dijkstra_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def dijkstra_compute(p, ox, oy):
_lib.TCOD_dijkstra_compute(p[0], c_int(ox), c_int(oy))
def dijkstra_path_set(p, x, y):
return _lib.TCOD_dijkstra_path_set(p[0], c_int(x), c_int(y))
def dijkstra_get_distance(p, x, y):
return _lib.TCOD_dijkstra_get_distance(p[0], c_int(x), c_int(y))
def dijkstra_size(p):
return _lib.TCOD_dijkstra_size(p[0])
def dijkstra_reverse(p):
_lib.TCOD_dijkstra_reverse(p[0])
def dijkstra_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_dijkstra_get(p[0], c_int(idx), byref(x), byref(y))
return x.value, y.value
def dijkstra_is_empty(p):
return _lib.TCOD_dijkstra_is_empty(p[0])
def dijkstra_path_walk(p):
x = c_int()
y = c_int()
if _lib.TCOD_dijkstra_path_walk(p[0], byref(x), byref(y)):
return x.value, y.value
return None,None
def dijkstra_delete(p):
_lib.TCOD_dijkstra_delete(p[0])
############################
# bsp module
############################
class _CBsp(Structure):
_fields_ = [('next', c_void_p),
('father', c_void_p),
('son', c_void_p),
('x', c_int),
('y', c_int),
('w', c_int),
('h', c_int),
('position', c_int),
('level', c_uint8),
('horizontal', c_bool),
]
_lib.TCOD_bsp_new_with_size.restype = POINTER(_CBsp)
_lib.TCOD_bsp_left.restype = POINTER(_CBsp)
_lib.TCOD_bsp_right.restype = POINTER(_CBsp)
_lib.TCOD_bsp_father.restype = POINTER(_CBsp)
_lib.TCOD_bsp_is_leaf.restype = c_bool
_lib.TCOD_bsp_contains.restype = c_bool
_lib.TCOD_bsp_find_node.restype = POINTER(_CBsp)
BSP_CBK_FUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
# python class encapsulating the _CBsp pointer
class Bsp(object):
def __init__(self, cnode):
pcbsp = cast(cnode, POINTER(_CBsp))
self.p = pcbsp
def getx(self):
return self.p.contents.x
def setx(self, value):
self.p.contents.x = value
x = property(getx, setx)
def gety(self):
return self.p.contents.y
def sety(self, value):
self.p.contents.y = value
y = property(gety, sety)
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def getpos(self):
return self.p.contents.position
def setpos(self, value):
self.p.contents.position = value
position = property(getpos, setpos)
def gethor(self):
return self.p.contents.horizontal
def sethor(self,value):
self.p.contents.horizontal = value
horizontal = property(gethor, sethor)
def getlev(self):
return self.p.contents.level
def setlev(self,value):
self.p.contents.level = value
level = property(getlev, setlev)
def bsp_new_with_size(x, y, w, h):
return Bsp(_lib.TCOD_bsp_new_with_size(x, y, w, h))
def bsp_split_once(node, horizontal, position):
_lib.TCOD_bsp_split_once(node.p, c_int(horizontal), position)
def bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,
maxVRatio):
_lib.TCOD_bsp_split_recursive(node.p, randomizer, nb, minHSize, minVSize,
c_float(maxHRatio), c_float(maxVRatio))
def bsp_resize(node, x, y, w, h):
_lib.TCOD_bsp_resize(node.p, x, y, w, h)
def bsp_left(node):
return Bsp(_lib.TCOD_bsp_left(node.p))
def bsp_right(node):
return Bsp(_lib.TCOD_bsp_right(node.p))
def bsp_father(node):
return Bsp(_lib.TCOD_bsp_father(node.p))
def bsp_is_leaf(node):
return _lib.TCOD_bsp_is_leaf(node.p)
def bsp_contains(node, cx, cy):
return _lib.TCOD_bsp_contains(node.p, cx, cy)
def bsp_find_node(node, cx, cy):
return Bsp(_lib.TCOD_bsp_find_node(node.p, cx, cy))
def _bsp_traverse(node, callback, userData, func):
# convert the c node into a python node
#before passing it to the actual callback
def node_converter(cnode, data):
node = Bsp(cnode)
return callback(node, data)
cbk_func = BSP_CBK_FUNC(node_converter)
func(node.p, cbk_func, userData)
def bsp_traverse_pre_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_pre_order)
def bsp_traverse_in_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_in_order)
def bsp_traverse_post_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_post_order)
def bsp_traverse_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_level_order)
def bsp_traverse_inverted_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData,
_lib.TCOD_bsp_traverse_inverted_level_order)
def bsp_remove_sons(node):
_lib.TCOD_bsp_remove_sons(node.p)
def bsp_delete(node):
_lib.TCOD_bsp_delete(node.p)
############################
# heightmap module
############################
class _CHeightMap(Structure):
_fields_=[('w', c_int),
('h', c_int),
('values', POINTER(c_float)),
]
_lib.TCOD_heightmap_new.restype = POINTER(_CHeightMap)
_lib.TCOD_heightmap_get_value.restype = c_float
_lib.TCOD_heightmap_has_land_on_border.restype = c_bool
class HeightMap(object):
def __init__(self, chm):
pchm = cast(chm, POINTER(_CHeightMap))
self.p = pchm
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def heightmap_new(w, h):
phm = _lib.TCOD_heightmap_new(w, h)
return HeightMap(phm)
def heightmap_set_value(hm, x, y, value):
_lib.TCOD_heightmap_set_value(hm.p, x, y, c_float(value))
def heightmap_add(hm, value):
_lib.TCOD_heightmap_add(hm.p, c_float(value))
def heightmap_scale(hm, value):
_lib.TCOD_heightmap_scale(hm.p, c_float(value))
def heightmap_clear(hm):
_lib.TCOD_heightmap_clear(hm.p)
def heightmap_clamp(hm, mi, ma):
_lib.TCOD_heightmap_clamp(hm.p, c_float(mi),c_float(ma))
def heightmap_copy(hm1, hm2):
_lib.TCOD_heightmap_copy(hm1.p, hm2.p)
def heightmap_normalize(hm, mi=0.0, ma=1.0):
_lib.TCOD_heightmap_normalize(hm.p, c_float(mi), c_float(ma))
def heightmap_lerp_hm(hm1, hm2, hm3, coef):
_lib.TCOD_heightmap_lerp_hm(hm1.p, hm2.p, hm3.p, c_float(coef))
def heightmap_add_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_add_hm(hm1.p, hm2.p, hm3.p)
def heightmap_multiply_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_multiply_hm(hm1.p, hm2.p, hm3.p)
def heightmap_add_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_add_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_dig_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_dig_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_rain_erosion(hm, nbDrops, erosionCoef, sedimentationCoef, rnd=0):
_lib.TCOD_heightmap_rain_erosion(hm.p, nbDrops, c_float( erosionCoef),
c_float( sedimentationCoef), rnd)
def heightmap_kernel_transform(hm, kernelsize, dx, dy, weight, minLevel,
maxLevel):
FARRAY = c_float * kernelsize
IARRAY = c_int * kernelsize
cdx = IARRAY(*dx)
cdy = IARRAY(*dy)
cweight = FARRAY(*weight)
_lib.TCOD_heightmap_kernel_transform(hm.p, kernelsize, cdx, cdy, cweight,
c_float(minLevel), c_float(maxLevel))
def heightmap_add_voronoi(hm, nbPoints, nbCoef, coef, rnd=0):
FARRAY = c_float * nbCoef
ccoef = FARRAY(*coef)
_lib.TCOD_heightmap_add_voronoi(hm.p, nbPoints, nbCoef, ccoef, rnd)
def heightmap_add_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta, scale):
_lib.TCOD_heightmap_add_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_scale_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta,
scale):
_lib.TCOD_heightmap_scale_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_dig_bezier(hm, px, py, startRadius, startDepth, endRadius,
endDepth):
IARRAY = c_int * 4
cpx = IARRAY(*px)
cpy = IARRAY(*py)
_lib.TCOD_heightmap_dig_bezier(hm.p, cpx, cpy, c_float(startRadius),
c_float(startDepth), c_float(endRadius),
c_float(endDepth))
def heightmap_get_value(hm, x, y):
return _lib.TCOD_heightmap_get_value(hm.p, x, y)
def heightmap_get_interpolated_value(hm, x, y):
return _lib.TCOD_heightmap_get_interpolated_value(hm.p, c_float(x),
c_float(y))
def heightmap_get_slope(hm, x, y):
return _lib.TCOD_heightmap_get_slope(hm.p, x, y)
def heightmap_get_normal(hm, x, y, waterLevel):
FARRAY = c_float * 3
cn = FARRAY()
_lib.TCOD_heightmap_get_normal(hm.p, c_float(x), c_float(y), cn,
c_float(waterLevel))
return cn[0], cn[1], cn[2]
def heightmap_count_cells(hm, mi, ma):
return _lib.TCOD_heightmap_count_cells(hm.p, c_float(mi), c_float(ma))
def heightmap_has_land_on_border(hm, waterlevel):
return _lib.TCOD_heightmap_has_land_on_border(hm.p, c_float(waterlevel))
def heightmap_get_minmax(hm):
mi = c_float()
ma = c_float()
_lib.TCOD_heightmap_get_minmax(hm.p, byref(mi), byref(ma))
return mi.value, ma.value
def heightmap_delete(hm):
_lib.TCOD_heightmap_delete(hm.p)
############################
# name generator module
############################
_lib.TCOD_namegen_generate.restype = c_char_p
_lib.TCOD_namegen_generate_custom.restype = c_char_p
def namegen_parse(filename,random=0) :
_lib.TCOD_namegen_parse(filename,random)
def namegen_generate(name) :
return _lib.TCOD_namegen_generate(name, 0)
def namegen_generate_custom(name, rule) :
return _lib.TCOD_namegen_generate(name, rule, 0)
def namegen_get_sets():
nb=_lib.TCOD_namegen_get_nb_sets_wrapper()
SARRAY = c_char_p * nb;
setsa = SARRAY()
_lib.TCOD_namegen_get_sets_wrapper(setsa)
return list(setsa)
def namegen_destroy() :
_lib.TCOD_namegen_destroy()
| deeredman1991/CreepSmash | tools/libtcod/libtcodpy.py | Python | mit | 60,784 | [
"Amber"
] | 52ee1f5374f1beb32d1f8e851a2d4c20d05f4e2ff22c641b31e0cfd8f9cf3e48 |
r"""
===============
Decoding (MVPA)
===============
.. contents:: Contents
:local:
:depth: 3
.. include:: ../git_links.inc
Design philosophy
=================
Decoding (a.k.a. MVPA) in MNE largely follows the machine
learning API of the scikit-learn package.
Each estimator implements ``fit``, ``transform``, ``fit_transform``, and
(optionally) ``inverse_transform`` methods. For more details on this design,
visit scikit-learn_. For additional theoretical insights into the decoding
framework in MNE, see [1]_.
For ease of comprehension, we will denote instantiations of the class using
the same name as the class but in small caps instead of camel cases.
Let's start by loading data for a simple two-class problem:
"""
# sphinx_gallery_thumbnail_number = 6
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import (SlidingEstimator, GeneralizingEstimator, Scaler,
cross_val_multiscore, LinearModel, get_coef,
Vectorizer, CSP)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax = -0.200, 0.500
event_id = {'Auditory/Left': 1, 'Visual/Left': 3} # just use two
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# The subsequent decoding analyses only capture evoked responses, so we can
# low-pass the MEG data. Usually a value more like 40 Hz would be used,
# but here low-pass at 20 so we can more heavily decimate, and allow
# the examlpe to run faster. The 2 Hz high-pass helps improve CSP.
raw.filter(2, 20)
events = mne.find_events(raw, 'STI 014')
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0.), preload=True,
reject=dict(grad=4000e-13, eog=150e-6), decim=10)
epochs.pick_types(meg=True, exclude='bads') # remove stim and EOG
X = epochs.get_data() # MEG signals: n_epochs, n_meg_channels, n_times
y = epochs.events[:, 2] # target: Audio left or right
###############################################################################
# Transformation classes
# ======================
#
# Scaler
# ^^^^^^
# The :class:`mne.decoding.Scaler` will standardize the data based on channel
# scales. In the simplest modes ``scalings=None`` or ``scalings=dict(...)``,
# each data channel type (e.g., mag, grad, eeg) is treated separately and
# scaled by a constant. This is the approach used by e.g.,
# :func:`mne.compute_covariance` to standardize channel scales.
#
# If ``scalings='mean'`` or ``scalings='median'``, each channel is scaled using
# empirical measures. Each channel is scaled independently by the mean and
# standand deviation, or median and interquartile range, respectively, across
# all epochs and time points during :class:`~mne.decoding.Scaler.fit`
# (during training). The :meth:`~mne.decoding.Scaler.transform` method is
# called to transform data (training or test set) by scaling all time points
# and epochs on a channel-by-channel basis. To perform both the ``fit`` and
# ``transform`` operations in a single call, the
# :meth:`~mne.decoding.Scaler.fit_transform` method may be used. To invert the
# transform, :meth:`~mne.decoding.Scaler.inverse_transform` can be used. For
# ``scalings='median'``, scikit-learn_ version 0.17+ is required.
#
# .. note:: Using this class is different from directly applying
# :class:`sklearn.preprocessing.StandardScaler` or
# :class:`sklearn.preprocessing.RobustScaler` offered by
# scikit-learn_. These scale each *classification feature*, e.g.
# each time point for each channel, with mean and standard
# deviation computed across epochs, whereas
# :class:`mne.decoding.Scaler` scales each *channel* using mean and
# standard deviation computed across all of its time points
# and epochs.
#
# Vectorizer
# ^^^^^^^^^^
# Scikit-learn API provides functionality to chain transformers and estimators
# by using :class:`sklearn.pipeline.Pipeline`. We can construct decoding
# pipelines and perform cross-validation and grid-search. However scikit-learn
# transformers and estimators generally expect 2D data
# (n_samples * n_features), whereas MNE transformers typically output data
# with a higher dimensionality
# (e.g. n_samples * n_channels * n_frequencies * n_times). A Vectorizer
# therefore needs to be applied between the MNE and the scikit-learn steps
# like:
# Uses all MEG sensors and time points as separate classification
# features, so the resulting filters used are spatio-temporal
clf = make_pipeline(Scaler(epochs.info),
Vectorizer(),
LogisticRegression(solver='lbfgs'))
scores = cross_val_multiscore(clf, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
score = np.mean(scores, axis=0)
print('Spatio-temporal: %0.1f%%' % (100 * score,))
###############################################################################
# PSDEstimator
# ^^^^^^^^^^^^
# The :class:`mne.decoding.PSDEstimator`
# computes the power spectral density (PSD) using the multitaper
# method. It takes a 3D array as input, converts it into 2D and computes the
# PSD.
#
# FilterEstimator
# ^^^^^^^^^^^^^^^
# The :class:`mne.decoding.FilterEstimator` filters the 3D epochs data.
#
# Spatial filters
# ===============
#
# Just like temporal filters, spatial filters provide weights to modify the
# data along the sensor dimension. They are popular in the BCI community
# because of their simplicity and ability to distinguish spatially-separated
# neural activity.
#
# Common spatial pattern
# ^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`mne.decoding.CSP` is a technique to analyze multichannel data based
# on recordings from two classes [2]_ (see also
# http://en.wikipedia.org/wiki/Common_spatial_pattern).
#
# Let :math:`X \in R^{C\times T}` be a segment of data with
# :math:`C` channels and :math:`T` time points. The data at a single time point
# is denoted by :math:`x(t)` such that :math:`X=[x(t), x(t+1), ..., x(t+T-1)]`.
# Common spatial pattern (CSP) finds a decomposition that projects the signal
# in the original sensor space to CSP space using the following transformation:
#
# .. math:: x_{CSP}(t) = W^{T}x(t)
# :label: csp
#
# where each column of :math:`W \in R^{C\times C}` is a spatial filter and each
# row of :math:`x_{CSP}` is a CSP component. The matrix :math:`W` is also
# called the de-mixing matrix in other contexts. Let
# :math:`\Sigma^{+} \in R^{C\times C}` and :math:`\Sigma^{-} \in R^{C\times C}`
# be the estimates of the covariance matrices of the two conditions.
# CSP analysis is given by the simultaneous diagonalization of the two
# covariance matrices
#
# .. math:: W^{T}\Sigma^{+}W = \lambda^{+}
# :label: diagonalize_p
# .. math:: W^{T}\Sigma^{-}W = \lambda^{-}
# :label: diagonalize_n
#
# where :math:`\lambda^{C}` is a diagonal matrix whose entries are the
# eigenvalues of the following generalized eigenvalue problem
#
# .. math:: \Sigma^{+}w = \lambda \Sigma^{-}w
# :label: eigen_problem
#
# Large entries in the diagonal matrix corresponds to a spatial filter which
# gives high variance in one class but low variance in the other. Thus, the
# filter facilitates discrimination between the two classes.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_eeg.py`
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_timefreq.py`
#
# .. note::
#
# The winning entry of the Grasp-and-lift EEG competition in Kaggle used
# the :class:`~mne.decoding.CSP` implementation in MNE and was featured as
# a `script of the week`_.
#
# .. _script of the week: http://blog.kaggle.com/2015/08/12/july-2015-scripts-of-the-week/ # noqa
#
# We can use CSP with these data with:
csp = CSP(n_components=3, norm_trace=False)
clf = make_pipeline(csp, LogisticRegression(solver='lbfgs'))
scores = cross_val_multiscore(clf, X, y, cv=5, n_jobs=1)
print('CSP: %0.1f%%' % (100 * scores.mean(),))
###############################################################################
# Source power comodulation (SPoC)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Source Power Comodulation (:class:`mne.decoding.SPoC`) [3]_
# identifies the composition of
# orthogonal spatial filters that maximally correlate with a continuous target.
#
# SPoC can be seen as an extension of the CSP where the target is driven by a
# continuous variable rather than a discrete variable. Typical applications
# include extraction of motor patterns using EMG power or audio patterns using
# sound envelope.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_spoc_CMC.py`
#
# xDAWN
# ^^^^^
# :class:`mne.preprocessing.Xdawn` is a spatial filtering method designed to
# improve the signal to signal + noise ratio (SSNR) of the ERP responses [4]_.
# Xdawn was originally
# designed for P300 evoked potential by enhancing the target response with
# respect to the non-target response. The implementation in MNE-Python is a
# generalization to any type of ERP.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_preprocessing_plot_xdawn_denoising.py`
# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_xdawn_eeg.py`
#
# Effect-matched spatial filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# The result of :class:`mne.decoding.EMS` is a spatial filter at each time
# point and a corresponding time course [5]_.
# Intuitively, the result gives the similarity between the filter at
# each time point and the data vector (sensors) at that time point.
#
# .. topic:: Examples
#
# * :ref:`sphx_glr_auto_examples_decoding_plot_ems_filtering.py`
#
# Patterns vs. filters
# ^^^^^^^^^^^^^^^^^^^^
#
# When interpreting the components of the CSP (or spatial filters in general),
# it is often more intuitive to think about how :math:`x(t)` is composed of
# the different CSP components :math:`x_{CSP}(t)`. In other words, we can
# rewrite Equation :eq:`csp` as follows:
#
# .. math:: x(t) = (W^{-1})^{T}x_{CSP}(t)
# :label: patterns
#
# The columns of the matrix :math:`(W^{-1})^T` are called spatial patterns.
# This is also called the mixing matrix. The example
# :ref:`sphx_glr_auto_examples_decoding_plot_linear_model_patterns.py`
# discusses the difference between patterns and filters.
#
# These can be plotted with:
# Fit CSP on full data and plot
csp.fit(X, y)
csp.plot_patterns(epochs.info)
csp.plot_filters(epochs.info, scalings=1e-9)
###############################################################################
# Decoding over time
# ==================
#
# This strategy consists in fitting a multivariate predictive model on each
# time instant and evaluating its performance at the same instant on new
# epochs. The :class:`mne.decoding.SlidingEstimator` will take as input a
# pair of features :math:`X` and targets :math:`y`, where :math:`X` has
# more than 2 dimensions. For decoding over time the data :math:`X`
# is the epochs data of shape n_epochs x n_channels x n_times. As the
# last dimension of :math:`X` is the time, an estimator will be fit
# on every time instant.
#
# This approach is analogous to SlidingEstimator-based approaches in fMRI,
# where here we are interested in when one can discriminate experimental
# conditions and therefore figure out when the effect of interest happens.
#
# When working with linear models as estimators, this approach boils
# down to estimating a discriminative spatial filter for each time instant.
#
# Temporal decoding
# ^^^^^^^^^^^^^^^^^
#
# We'll use a Logistic Regression for a binary classification as machine
# learning model.
# We will train the classifier on all left visual vs auditory trials on MEG
clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot
fig, ax = plt.subplots()
ax.plot(epochs.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC') # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')
###############################################################################
# You can retrieve the spatial filters and spatial patterns if you explicitly
# use a LinearModel
clf = make_pipeline(StandardScaler(),
LinearModel(LogisticRegression(solver='lbfgs')))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
time_decod.fit(X, y)
coef = get_coef(time_decod, 'patterns_', inverse_transform=True)
evoked = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
evoked.plot_joint(times=np.arange(0., .500, .100), title='patterns',
**joint_kwargs)
###############################################################################
# Temporal generalization
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# Temporal generalization is an extension of the decoding over time approach.
# It consists in evaluating whether the model estimated at a particular
# time instant accurately predicts any other time instant. It is analogous to
# transferring a trained model to a distinct learning problem, where the
# problems correspond to decoding the patterns of brain activity recorded at
# distinct time instants.
#
# The object to for Temporal generalization is
# :class:`mne.decoding.GeneralizingEstimator`. It expects as input :math:`X`
# and :math:`y` (similarly to :class:`~mne.decoding.SlidingEstimator`) but
# generates predictions from each model for all time instants. The class
# :class:`~mne.decoding.GeneralizingEstimator` is generic and will treat the
# last dimension as the one to be used for generalization testing. For
# convenience, here, we refer to it as different tasks. If :math:`X`
# corresponds to epochs data then the last dimension is time.
#
# This runs the analysis used in [6]_ and further detailed in [7]_:
# define the Temporal generalization object
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc',
verbose=True)
scores = cross_val_multiscore(time_gen, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot the diagonal (it's exactly the same as the time-by-time decoding above)
fig, ax = plt.subplots()
ax.plot(epochs.times, np.diag(scores), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Decoding MEG sensors over time')
###############################################################################
# Plot the full (generalization) matrix:
fig, ax = plt.subplots(1, 1)
im = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',
extent=epochs.times[[0, -1, 0, -1]], vmin=0., vmax=1.)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Temporal generalization')
ax.axvline(0, color='k')
ax.axhline(0, color='k')
plt.colorbar(im, ax=ax)
###############################################################################
# Source-space decoding
# =====================
#
# Source space decoding is also possible, but because the number of features
# can be much larger than in the sensor space, univariate feature selection
# using ANOVA f-test (or some other metric) can be done to reduce the feature
# dimension. Interpreting decoding results might be easier in source space as
# compared to sensor space.
#
# .. topic:: Examples
#
# * :ref:`tut_dec_st_source`
#
# Exercise
# ========
#
# - Explore other datasets from MNE (e.g. Face dataset from SPM to predict
# Face vs. Scrambled)
#
# References
# ==========
# .. [1] Jean-Rémi King et al. (2018) "Encoding and Decoding Neuronal Dynamics:
# Methodological Framework to Uncover the Algorithms of Cognition",
# in press. https://hal.archives-ouvertes.fr/hal-01848442/
# .. [2] Zoltan J. Koles. The quantitative extraction and topographic mapping
# of the abnormal components in the clinical EEG. Electroencephalography
# and Clinical Neurophysiology, 79(6):440--447, December 1991.
# .. [3] Dahne, S., Meinecke, F. C., Haufe, S., Hohne, J., Tangermann, M.,
# Muller, K. R., & Nikulin, V. V. (2014). SPoC: a novel framework for
# relating the amplitude of neuronal oscillations to behaviorally
# relevant parameters. NeuroImage, 86, 111-122.
# .. [4] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
# algorithm to enhance evoked potentials: application to
# brain-computer interface. Biomedical Engineering, IEEE Transactions
# on, 56(8), 2035-2043.
# .. [5] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
# multi-sensor data to a single time course that reveals experimental
# effects", BMC Neuroscience 2013, 14:122
# .. [6] Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
# and Stanislas Dehaene, "Two distinct dynamic modes subtend the
# detection of unexpected sounds", PLOS ONE, 2013,
# http://www.ncbi.nlm.nih.gov/pubmed/24475052
# .. [7] King & Dehaene (2014) 'Characterizing the dynamics of mental
# representations: the temporal generalization method', Trends In
# Cognitive Sciences, 18(4), 203-210.
# http://www.ncbi.nlm.nih.gov/pubmed/24593982
| mne-tools/mne-tools.github.io | 0.17/_downloads/34a6d4fd8dc9f5e092fe81327a209348/plot_sensors_decoding.py | Python | bsd-3-clause | 18,160 | [
"VisIt"
] | df5486f7727a71ff69da397e28854e86d3be5cc4adb4df63b03f82641b2362fc |
#!/usr/bin/env python3
import sys
from antlr4 import *
from llvmlite import ir
from LucyLexer import LucyLexer
from LucyParser import LucyParser
from LucyVisitor import LucyVisitor
class SymbolTable:
def __init__(self):
self._symbols = [dict()]
def push_frame(self, symbols=None):
symbols = dict() if symbols is None else symbols
self._symbols.append(symbols)
def pop_frame(self):
self._symbols.pop()
def resolve(self, ide):
for frame in reversed(self._symbols):
try:
return frame[ide]
except KeyError:
pass
raise IndexError
def bind(self, ide, ptr):
self._symbols[-1][ide] = ptr
class CodeGenerator(LucyVisitor):
def __init__(self):
self.types = {
'Void': ir.VoidType(),
'Int': ir.IntType(32),
}
self.module = ir.Module()
self.symbols = SymbolTable()
def new_var(self, typ, ide, val=None):
ptr = self.builder.alloca(typ, name=ide)
self.symbols.bind(ide, ptr)
if val is not None:
self.builder.store(val, ptr)
def new_func(self, typ, ide):
self.func = ir.Function(self.module, typ, name=ide)
self.symbols.bind(ide, self.func)
def new_block(self):
block = self.func.append_basic_block(name='.entry')
self.builder = ir.IRBuilder(block)
def visitVarDecl(self, ctx):
typ = ctx.typ().getText()
ide = ctx.ID().getText()
self.new_var(self.types[typ], ide)
if ctx.expr():
self.visitAssign(ctx)
def visitFuncDecl(self, ctx):
ide = ctx.ID().getText()
try:
ret_typ = ctx.typ().getText()
except AttributeError:
ret_typ = 'Void'
try:
params = ctx.params().param()
param_types = [self.types[x.typ().getText()] for x in params]
param_names = [x.ID().getText() for x in params]
except AttributeError:
param_types = []
param_names = []
func_typ = ir.FunctionType(self.types[ret_typ], param_types)
self.new_func(func_typ, ide)
self.symbols.push_frame()
self.new_block()
for arg, typ, name in zip(self.func.args, param_types, param_names):
arg.name = name
self.new_var(typ, name, arg)
self.visit(ctx.block())
self.symbols.pop_frame()
if not self.builder.block.is_terminated and func_typ == self.types['Void']:
self.builder.ret_void()
def visitBlock(self, ctx):
self.symbols.push_frame()
for child in ctx.children:
self.visit(child)
self.symbols.pop_frame()
def visitRet(self, ctx):
try:
self.builder.ret(self.visit(ctx.expr()))
except AttributeError:
if self.func.return_value.type == self.types['Void']:
self.builder.ret_void()
else:
raise Exception("Function must return a value.")
def visitAssign(self, ctx):
ide = ctx.ID().getText()
try:
ptr = self.symbols.resolve(ide)
val = self.visit(ctx.expr())
if ptr.type.pointee != val.type:
raise Exception("Type mismatch in assignment.")
return self.builder.store(val, ptr)
except IndexError:
raise Exception("Undeclared identifier in assignment.")
def visitParensExpr(self, ctx):
return self.visit(ctx.expr())
def visitCallExpr(self, ctx):
ide = ctx.ID().getText()
try:
func = self.symbols.resolve(ide)
if not isinstance(func, ir.Function):
raise Exception("Trying to call non-function.")
try:
params = [self.visit(x) for x in ctx.exprList().expr()]
except AttributeError:
params = []
if len(params) != len(func.args):
raise Exception("Wrong number of parameters in call.")
for param, arg in zip(params, func.args):
if param.type != arg.type:
raise Exception("Wrong type of parameter.")
return self.builder.call(func, params)
except IndexError:
raise Exception("Undeclared identifier in expression.")
def visitMinusExpr(self, ctx):
return self.builder.neg(self.visit(ctx.expr()))
def visitMulDivExpr(self, ctx):
op = ctx.op.text
lhs = self.visit(ctx.expr(0))
rhs = self.visit(ctx.expr(1))
if op == '*':
return self.builder.mul(lhs, rhs)
else:
return self.builder.sdiv(lhs, rhs)
def visitAddSubExpr(self, ctx):
op = ctx.op.text
lhs = self.visit(ctx.expr(0))
rhs = self.visit(ctx.expr(1))
if op == '+':
return self.builder.add(lhs, rhs)
else:
return self.builder.sub(lhs, rhs)
def visitIdExpr(self, ctx):
ide = ctx.ID().getText()
try:
ptr = self.symbols.resolve(ide)
return self.builder.load(ptr)
except IndexError:
raise Exception("Undeclared identifier in expression.")
def visitIntExpr(self, ctx):
integer = ctx.INT().getText()
return ir.Constant(self.types['Int'], int(integer))
if __name__ == '__main__':
inputs = FileStream(sys.argv[1])
lexer = LucyLexer(inputs)
tokens = CommonTokenStream(lexer)
parser = LucyParser(tokens)
tree = parser.program()
codegen = CodeGenerator()
codegen.visit(tree)
print(codegen.module)
| AndreaOrru/Lucy | compiler/lucyc.py | Python | bsd-2-clause | 5,689 | [
"VisIt"
] | 7f678e7c144348e4975bb8769330edc62caf024a4902953486002a21371f38c1 |
from setuptools import setup
import glob,os,sys
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
scriptlist =glob.glob(os.path.join('bin', '*.py'))
scriptlist+=glob.glob(os.path.join('sbin', '*.py'))
from PyFoam import versionString
# with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='PyFoam',
version=versionString(),
packages=['PyFoam',
'PyFoam.Applications',
'PyFoam.Basics',
'PyFoam.Execution',
'PyFoam.Infrastructure',
'PyFoam.IPythonHelpers',
'PyFoam.LogAnalysis',
'PyFoam.RunDictionary',
'PyFoam.Paraview',
'PyFoam.Site',
'PyFoam.ThirdParty',
'PyFoam.ThirdParty.ply',
'PyFoam.ThirdParty.Gnuplot',
'PyFoam.Wrappers'],
description='Python Utilities for OpenFOAM',
# long_description=long_description,
url='http://openfoamwiki.net/index.php/Contrib/PyFoam',
author='Bernhard F.W. Gschaider',
author_email='bgschaid@ice-sf.at',
scripts=scriptlist,
license="GPL",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
keywords='cfd openfoam',
install_requires=['numpy'],
)
| mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam | setup.py | Python | gpl-2.0 | 1,967 | [
"ParaView"
] | 5d6bacf6ac03de24ec05d00ad62afaf1f02a74b66dd0570d2fd4812791ba39f6 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Mathias Hauser
# Date: 08.2014
# USAGE: CONVERT clm history files in xy-format (lat, lon) to col format
try:
import netCDF4 as nc
except: # netCDF4 is not available on cscs
import scipy.io.netcdf as nc
nc.Dataset = nc.netcdf_file
import shutil
# DEFINE FILE NAMES
# example file in column/ 3D form (see README)
# this file needs to exist
fN_3D_in = 'file_3D_in.nc'
# input file in lat-lon/ 4D form
# this file needs to exist
# this is what we want to transform
fN_4D = 'file_4D.nc'
# output file: this is the transformed fN_xy
# does not need to exist (will be overwritten)
fN_3D_out = 'file_3D_out.nc'
# START PROGRAM
with nc.Dataset(fN_3D_in) as ncf:
# get indices
cols1d_ixy = ncf.variables['cols1d_ixy'][:]
cols1d_jxy = ncf.variables['cols1d_jxy'][:]
cols1d_itype_lunit = ncf.variables['cols1d_itype_lunit'][:]
shape = ncf.variables['SOILLIQ'].shape
istsoil = 1
# not all the points in SOILLIQ and SOILICE are SM
sel_soil = cols1d_itype_lunit == istsoil
# python uses 0 based indexing
col = cols1d_ixy[sel_soil] - 1
row = cols1d_jxy[sel_soil] - 1
# copy col/3D 'example file' to 'output file'
shutil.copyfile(fN_3D_in, fN_3D_out)
# read from fN_xy (only the first 10 lvl are SM levels)
with nc.Dataset(fN_4D) as ncf:
SOILLIQ_xy = ncf.variables['SOILLIQ'][:, 0:10, :, :]
SOILICE_xy = ncf.variables['SOILICE'][:, 0:10, :, :]
# write out the transformed SM
with nc.Dataset(fN_3D_out, 'a') as ncf:
ncf.variables['SOILLIQ'][:, 0:10, sel_soil] = SOILLIQ_xy[:, :, row, col]
ncf.variables['SOILICE'][:, 0:10, sel_soil] = SOILICE_xy[:, :, row, col]
# LOOP VERSION
# for day in [0]: # replace with 0:365
# for lev in [0]: # replace with 0:10
# for i in xrange(n_cols1d):
# col = cols1d_ixy[i]
# row = cols1d_jxy[i]
# if cols1d_itype_lunit[i] == 1:
# SL[row, col] = SOILLIQ[day, lev, sel_soil]
# SL_v = np.empty(shape=(13693,))
# SL_v.fill(np.nan)
# for day in [0]:
# for lev in [0]:
# for i in xrange(n_cols1d):
# col = cols1d_ixy[i]
# row = cols1d_jxy[i]
# if cols1d_itype_lunit[i] == 1:
# SL_v[i] = SOILLIQ_xy[day, lev, row - 1, col - 1]
| IACETH/prescribeSM_cesm_1.2.x | clm_col_to_xy_example.py | Python | mit | 2,286 | [
"NetCDF"
] | d3a046fbf94c4d184be11f92aa4682da8f331d84ff5f69da84eef66ec36f86d3 |
# coding: utf-8
texto = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Afterfest</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta content="all" name="robots" />
<meta http-equiv="content-language" content="portuguese" />
<meta content="br" name="geo.country" />
<meta content="true" name="MSSmartTagsPreventParsing" />
<meta http-equiv="imagetoolbar" content="no" />
<meta content="1 days" name="revisit-after" />
<meta content="afterfest, fotos afterfest, cobertura, cobertura afterfest, fotos balada, fotos festa, loft, areia" name="keywords" />
<meta content="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013" name="description" />
<meta property="fb:app_id" content="156248407757982"/>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<link rel="stylesheet" type="text/css" href="/css/reset.css" />
<link rel="stylesheet" type="text/css" href="/css/site.css" />
<link rel="stylesheet" type="text/css" href="/css/plugins.css" />
<script type="text/javascript">
var BASE = '';
</script>
<script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js"></script>
<script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/swfobject/2.2/swfobject.js"></script>
<script type="text/javascript" src="/js/jquery.fancybox-1.3.1.pack.js"></script>
<script type="text/javascript" src="/js/jquery.jalert.packed.js"></script>
<script type="text/javascript" src="/js/util.js"></script>
<script type="text/javascript" src="/js/site.js"></script>
<link href="http://www.afterfest.com.br/favicon.ico" type="icon" rel="icon" /><link href="http://www.afterfest.com.br/favicon.ico" type="icon" rel="shortcut icon" />
</head>
<body>
<div id="topo">
<h1><a href="/">Afterfest</a></h1>
<div class="ads">
<div id="banner_flash_topo"></div>
<script type="text/javascript">
swfobject.embedSWF(
'/uploads/banners/topo/20120819-052526-.swf',
'banner_flash_topo',
600,
100,
'9.0.0',null, null, { wmode: 'transparent' })
</script>
</div>
</div><ul id="menu">
<li class="item">
<a class="baladas" href="/baladas">baladas</a>
</li><li class="item">
<a class="bares" href="/bares">bares</a>
</li><li class="item">
<a class="noticias" href="/noticias">noticias</a>
</li><li class="item">
<a class="coberturas " href="/coberturas ">coberturas</a>
</li><li class="item">
<a class="agenda" href="/agenda">agenda</a>
</li><li class="item">
<a class="gatas" href="/gatas">gatas</a>
</li><li class="item">
<a class="contato" href="/contato">contatos</a>
</li><li class="item">
<a class="cadastro" href="/cadastro">equipe</a>
</li></ul>
<div class="conteudo" id="galeria">
<div class="limitador">
<div class="header_pagina">
<div class="in">
<strong class="header_cobertura">Coberturas</strong>
<ul>
<li><a href="/fotos/hd/XV-Edicao/BIXURRASCO-">ver em 3D</a></li>
</ul>
</div>
</div>
<div class="filtro">
<label for="filtroLocal">LOCAL</label>
<select name="data[filtroLocal]" onchange="AF.buscarCoberturasPorLocal(this.value, "#filtroCobertura");" id="filtroLocal">
<option value="0">TODOS</option>
<option value="10">Arabica's</option>
<option value="16">Dunluce Irish Pub</option>
<option value="62">Budapeste Dinner</option>
<option value="19">Gengibre Lounge</option>
<option value="1">180° Ubatuba</option>
<option value="60">Store Disco</option>
<option value="20">Terraço Daslu</option>
<option value="29">Opium São Paulo</option>
<option value="31">Sirena</option>
<option value="32">Glass Club</option>
<option value="17">Parque da Cidade</option>
</select>
<label for="filtroCobertura">FESTA</label>
<select name="data[filtroCobertura]" onchange="AF.carregarCobertura(this.value);" id="filtroCobertura">
<option value="0">Escolha uma festa</option>
<option value="Loft-Club-Mogi-Inauguracao-VIP">10/04/10 - Inauguração Vip</option>
<option value="BRED-E-BRENO-Lancamento-do-novo-DVD">12/08/11 - BRED E BRENO - Lançamento do novo DVD</option>
<option value="Churas-Triplo-C">31/08/13 - Churras</option>
<option value="Festa-a-Fantasia">21/05/11 - Festa a Fantasia</option>
<option value="Friends-on-Decks-Dai-Ferreira">20/05/11 - Friends on Decks - Dai Ferreira</option>
<option value="Loft-Club-Mogi-Inauguracao">17/04/10 - Inauguração</option>
<option value="Inauguracao-Led-Club">22/06/11 - Inauguração Led Club</option>
<option value="Just-Black-DJ-Ricardo-Menga-Robotron-63">16/04/11 - Just Black - DJ Ricardo Menga + Robotron 63</option>
<option value="KF-Hip-House-Especial-1-Ano-DJ-Puff-DJ-Milk-DJ-Hadji">19/06/13 - KF Hip House - Especial 1 Ano + B.Day Gisele Fernandes - DJ Puff + DJ Milk + DJ Hadji</option>
<option value="Lancamento-Glamour-La-Locomotive-Winter-Dj-Fadi-Alameddine">17/05/13 - Lançamento Glamour + La Locomotive Winter - Dj Fadi Alameddine</option>
<option value="Only-Vip-Dj-Rubens-Junior">22/07/11 - Only Vip - Dj Rubens Junior</option>
<option value="Ticon-Live-Pistinha-Dj-Felix">27/05/11 - Ticon Live + Pistinha Dj Felix</option>
<option value="Tribe-10-Anos">16/07/11 - Tribe 10 Anos</option>
<option value="[-Sexta-Premium-]-Caio-e-Rafael-Andre-Mello-e-Diego">20/05/11 - [ Sexta Premium ] - Caio e Rafael + Andre Mello e Diego</option>
<option value="Coquetel-de-Inauguracao">15/06/11 - Coquetel de Inauguração</option>
<option value="Loft-Club-Mogi-Dj-Edgard-Fontes">24/04/10 - Dj Edgard Fontes (Anzu)</option>
<option value="Dj-Sound-Tour-2011">21/05/11 - Dj Sound Tour 2011</option>
<option value="House-of-Stage">09/04/11 - House of Stage</option>
<option value="KF-Hip-House-34-Dj-Puff">17/07/13 - KF Hip House #34 - Dj Puff</option>
<option value="Lagbeat-1-Edicao">15/10/11 - Lagbeat 1ª Edição</option>
<option value="Michel-Palazzo-Ressaca-de-carnaval">12/03/11 - Michel Palazzo - Ressaca de carnaval</option>
<option value="Sirena-Tour">23/07/11 - Sirena Tour</option>
<option value="Sirena-Tour-Vale-Edition">14/05/11 - Sirena Tour Vale Edition</option>
<option value="Spyzer-Live">20/05/11 - Spyzer Live</option>
<option value="Festa-do-Branco-Rodrigo-Faal">30/04/11 - Festa do Branco - Rodrigo Faal</option>
<option value="House-no-Deck-Elas-Convidam-Dj-Dai-Ferreira-BDay-Livia-Krom">28/09/12 - House no Deck - Elas Convidam - Dj Daï Ferreira + B.Day Lívia Krom</option>
<option value="Sirena-Sao-Paulo-Party">20/08/11 - Sirena - São Paulo Party</option>
<option value="Tom-Keller">29/07/11 - Tom Keller</option>
<option value="Top-5-Djs-Nova-Geracao">14/05/11 - Top 5 Djs - Nova Geração</option>
<option value="Veuve-Clicquot">24/09/11 - Veuve Clicquot</option>
<option value="Aniversario-do-PUB-RAIMUNDOS">05/09/13 - Aniversário do PUB - RAIMUNDOS</option>
<option value="SEXTA-FEIRA">06/09/13 - SEXTA-FEIRA</option>
<option value="Camaleao-Festa-8-Anos-Hotnight">30/04/10 - Festa de 8 Anos Site Hotnight</option>
<option value="Festa-Fantasia">28/05/11 - Festa Fantasia</option>
<option value="Gabe-Live">18/03/11 - Gabe Live!</option>
<option value="Torcida-Dunlace-Irish-Pub-Brasil-Costa-do-Marfim">20/06/10 - Jogo: Brasil x Costa do Marfim</option>
<option value="Loft-Club-Mogi-Lançamento-Chandon-Baby-Disco-Rosé">29/05/10 - Lançamento Chandon Baby Disco Rosé</option>
<option value="Loft-Club-Mogi-Just-Black">22/05/10 - Just Black (Festa do Preto)</option>
<option value="Dj-Mark-Fischer-Winter-Vibe">05/06/10 - Dj Mark Fischer - Winter Vibe</option>
<option value="House-no-Deck-1309-Sexta-Feira">13/09/13 - House no Deck - 13.09 - Sexta-Feira</option>
<option value="XV-Edicao" selected="selected">14/09/13 - XV Edicao - FATEC</option>
<option value="Nud-Cabaret-Miami-Sessions-DJ-Rod-B.-e-DJ-Lance-Blaise">18/06/10 - Miami Sessions - DJ Rod B. e DJ Lance Blaise</option>
<option value="Natal-na-Mansao">25/12/10 - Natal na Mansão</option>
<option value="Chopperia-Loucos-Por-Futebol-Sextaneja-Robson-Luiz-e-Banda">09/08/13 - Chopperia Loucos Por Futebol - Sextaneja - Robson Luiz e Banda</option>
<option value="Loft-Club-Mogi-Energia-na-Loft">19/06/10 - Energia na Loft</option>
<option value="Felguk-Live">17/05/13 - Felguk Live!</option>
<option value="House-no-Deck-Special-BDay-Diego-Kawasaki">30/08/13 - House no Deck - Special B.Day Diego Kawasaki</option>
<option value="KF-Hip-House-36-Dj-Hadji">31/07/13 - KF Hip House #36 - Dj Hadji</option>
<option value="1-Arraia-do-PUB-Dj-Danilo-Stellet-Dj-Fabricio-Miotto-Dj-Nogue-Dj-Estevao-Manfiolli">30/06/13 - 1º Arraiá do PUB - Dj Danilo Stellet + Dj Fabrício Miotto + Dj Nogue + Dj Estevão Manfiolli</option>
<option value="Carnaval-KF-Night-Marketing-SEXTA">08/02/13 - Carnaval KF Night Marketing SEXTA</option>
<option value="Churras-da-Galera-Edicao-3-Anos">24/08/13 - Churras da Galera - Edição 3 Anos</option>
<option value="Ciroc-Party-Dj-Junior-Fonseca">07/06/13 - Ciroc Party - Dj Junior Fonseca</option>
<option value="Eclatt-Housevalley-D-NOX-Du-Serena-Tom-Keller-Andre-Pulse">17/12/11 - Eclatt Housevalley - D-NOX + Du Serena + Tom Keller + André Pulse</option>
<option value="Eco-Private">03/06/12 - Eco Private</option>
<option value="Festa-de-Abertura-Temporada-2011-2012">03/09/11 - Festa de Abertura Temporada 2011/2012</option>
<option value="Grupo-Intimidade-SA-Final-Recopa-Corinthians-x-Sao-Paulo">17/07/13 - Grupo Intimidade S/A - Final Recopa - Corinthians x São Paulo</option>
<option value="House-no-Deck-Dj-Felipe-Faria-Dj-Noggue">09/08/13 - House no Deck - Dj Felipe Faria + Dj Noggue</option>
<option value="House-no-Deck-Dj-Paul-C-e-DJ-Oca">20/07/12 - House no Deck - Dj Paul C e DJ Oca</option>
<option value="House-no-Deck-Santo-Luxo-Dj-Pedro-Saab">23/08/13 - House no Deck - Santo Luxo - Dj Pedro Saab</option>
<option value="House-no-Deck-Especial-Afterfest-6-Projeto-Bassline-Andrew-Thompson">09/11/12 - House no Deck Especial Afterfest #6 - Projeto Bassline + Andrew Thompson</option>
<option value="KF-Hip-House-10-Dj-Puff-BDay-Jessica-Caroline">17/10/12 - KF Hip House #10 - Dj Puff + B.Day Jessica Caroline</option>
<option value="KF-Hip-House-28-DJ-Hadji-Anao-Live-vocal">29/05/13 - KF Hip House #28 - DJ Hadji + Anão (Live vocal)</option>
<option value="KF-Hip-House-32-DJ-Puff">26/06/13 - KF Hip House #32 - DJ Puff</option>
<option value="KF-Hip-House-37-DJ-Puff">07/08/13 - KF Hip House #37 - DJ Puff</option>
<option value="KF-Hip-House-Volcom-Dark-Bar-World-Tour-Dj-Puff">27/08/13 - KF Hip House - Volcom Dark Bar World Tour - Dj Puff</option>
<option value="Pagodao-do-Santo-Cupim-na-Mesa-Andre-Marinho-SP-Grupo-DIVA-Tudo-Entre-Amigos-Sambaja">25/05/13 - Pagodão do Santo - Cupim na Mesa (André Marinho - SP) + Grupo DIVA + Tudo Entre Amigos + Sambajá</option>
<option value="Pagodao-do-Santo-Cupim-na-Mesa-GPC-Batucada-E-Stefano-e-Grupo-Delirio">13/04/13 - Pagodão do Santo - Cupim na Mesa + GPC + Batucada E Stéfano e Grupo Delírio</option>
<option value="Ultra-Music-Festival-Brasil-Swedish-House-Mafia">03/12/11 - Ultra Music Festival - Brasil - Swedish House Mafia</option>
<option value="[-Sexta-Premium-]-Marcos-Gustavo-Diego-Carvalho-Andre">19/08/11 - [ Sexta Premium ] - Marcos & Gustavo + Diego Carvalho & André</option>
<option value="Camaleao-Jogo-Brasil-x-Chile">28/06/10 - Jogo: Brasil x Chile</option>
<option value="Pagodao-Do-Santo-Cupim-na-Mesa-Grupo-Pode-Confiar-Stefano-e-Grupo-Delirio-Grupo-Batucada-Dj-Estevao-Manfiolli">06/07/13 - Pagodão Do Santo - Cupim na Mesa + Grupo Pode Confiar + Stéfano e Grupo Delírio + Grupo Batucada + Dj Estevão Manfiolli</option>
<option value="Camaleao-Festa-de-1-Ano">02/07/10 - Festa de 1 Ano da Camaleão</option>
<option value="Fapija-2010-Show-Maria-Cecília-e-Rodolfo">10/07/10 - Camarote Show Maria Cecília e Rodolfo</option>
<option value="Festa-do-Branco---DEEP-Entertainment---Terraco-Daslu">28/08/10 - Festa do Branco - DEEP Entertainment - Terraço Daslu</option>
<option value="Loft-Club-Mogi-Seven-7-Pecados-Capitais">10/07/10 - Seven - 7 Pecados Capitais</option>
<option value="Fapija-2010-Thai-Estacao-Show-Maria-Cecília-e-Rodolfo">10/07/10 - Estação Show</option>
<option value="Inauguracao">04/09/10 - Inauguração - Maresias-SP</option>
<option value="Martijn-Ten-Velden">29/01/11 - Martijn Ten Velden</option>
<option value="Violive">03/09/10 - Violive</option>
<option value="We-Love-Electro-2-Anos">12/02/11 - We Love Electro - 2 Anos</option>
<option value="ElectroFolia---Dj-Dimy-Soler">31/07/10 - ElectroFolia - Dj Dimy Soler</option>
<option value="MOB-Gathering">02/10/10 - MOB Gathering - Hotel Transamérica SP</option>
<option value="Chopperia-Loucos-Por-Futebol-Sextaneja-David-Saconi">26/07/13 - Chopperia Loucos Por Futebol - Sextaneja - David Saconi</option>
<option value="Circo-Loco-The-Next-Level-Dj-Peppo-Santiago-Dj-Rodrigo-S-Legiao-Urbana-Cover">24/05/13 - Circo Loco - The Next Level - Dj Peppo Santiago + Dj Rodrigo S. + Legião Urbana Cover</option>
<option value="Desfile-OFD-10-Anos">23/09/10 - Desfile OFD 10 Anos [ Famosos ]</option>
<option value="Festa-do-Branco-ZOOM-BOXX">14/12/12 - Festa do Branco - ZOOM BOXX</option>
<option value="House-no-Deck-Encerramento-Fire-Up-Dj-Juy">02/08/13 - House no Deck - Encerramento Fire Up - Dj Juy</option>
<option value="House-no-Deck-Warm-Up-Parador-Maresias-DJ-Marcelo-Tromboni">05/10/12 - House no Deck - Warm Up Parador Maresias - DJ Marcelo Tromboni</option>
<option value="House-Valley-Winter-Edition-Dj-Carka-Schwiderski-Dj-Juy-Dj-Robson-Nogueira-Dj-Thiago-Germek-Dj-Pedro-Saab">06/07/13 - House Valley Winter Edition - Dj Carla Schwiderski + Dj Juy + Dj Robson Nogueira + Dj Thiago Germek + Dj Pedro Saab</option>
<option value="KF-Hip-House-11-Dj-Hadji">24/10/12 - KF Hip House #11 - Dj Hadji</option>
<option value="KF-Hip-House-29-DJ-Puff">05/06/13 - KF Hip House #29 - DJ Puff</option>
<option value="Oscar-Fashion-Days">22/09/10 - Oscar Fashion Days 10 Anos</option>
<option value="Pagodao-Do-Santo-com-Cupim-na-Mesa-Intimidade-S-A-Pode-Confiar-Stefano-Dj-Estevao-Manfiolli">15/06/13 - Pagodão Do Santo com Cupim na Mesa + Intimidade S-A + Pode Confiar + Stéfano + Dj Estevão Manfiolli</option>
<option value="Celebration-BDay-Dj-Paul-C-Noggue-Nicodemo">26/07/13 - Celebration B.Day - Dj Paul C. + Noggue + Nicodemo</option>
<option value="Choperia-Loucos-Por-Futebol-Grupo-Pagode-Sapeca">21/07/13 - Chopperia Loucos Por Futebol - Grupo Pagode Sapeca</option>
<option value="Festa-de-1-Ano-da-PROMO">09/10/10 - Festa de 1 Ano da PROMO</option>
<option value="Glamour-by-Fadi-Alameddine">09/03/13 - Glamour by Fadi Alameddine</option>
<option value="Hedkandi-11-Years-Party-Dj-Sebastian-Arevalo-Dj-Thiago-Germek-Dj-Estevao-Manfiolli-Dj-Danilo-Stellet-Dj-Fabricio-Mioto-Dj-Pedro-Saab">20/07/13 - Hedkandi 11 Years Party - Dj Sebastian Arévalo + Dj Thiago Germek + Dj Estevão Manfiolli + Dj Danilo Stellet + Dj Fabricio Mioto + Dj Pedro Saab</option>
<option value="House-no-Deck-FUCK-YOU-PLASTIC-DJ-Bassline-Andrew-Thompson-Rose-Aloy-vs-Marcelo-Nascimento-Marcelo-Lifeguard-vs-Diego-Kaos">16/08/13 - House no Deck - !FUCK YOU PLASTIC DJ! - Bassline + Andrew Thompson + Rose Aloy vs Marcelo Nascimento + Marcelo Lifeguard vs Diego Kaos</option>
<option value="House-no-Deck-Dj-Marcelo-Tromboni-2">19/07/13 - House no Deck - Dj Marcelo Tromboni #2</option>
<option value="KF-Hip-House-23-DJ-Milk">24/04/13 - KF Hip House #23 - DJ Milk - B.Day Lucas Funchal</option>
<option value="KF-Hip-House-35-DJ-Milk">24/07/13 - KF Hip House #35 - DJ Milk</option>
<option value="Reabertura">28/08/13 - Reabertura</option>
<option value="Saint-Patrick's-Day-Dj-Estevao-Manfiolli">17/03/13 - Saint Patrick's Day - Dj Estevão Manfiolli</option>
<option value="AfterFest-6-Anos-com-DJ-Felix-e-Renan-Noise-BDay-Suellen-Thiago-Luiz">14/09/12 - AfterFest 6 Anos com DJ Felix e Renan Noise + B.Day Suellen + Thiago + Luiz</option>
<option value="Bruno-Barudi-e-DarthVader">26/12/11 - Bruno Barudi e Darth&Vader</option>
<option value="Ciroc-Party">15/03/13 - Cîroc Party</option>
<option value="DJ-Mario-Fischetti-Dj-Danilo-Stellet">31/08/12 - DJ Mario Fischetti + Dj Danilo Stellet</option>
<option value="Dream-Session-Dj-Andre-Plati">15/03/13 - Dream Session - Dj André Plati</option>
<option value="House-no-Deck-FUCK-YOU-PLASTIC-DJ-Dj-Andrew-Thompson-Dj-Marcelo-Lifeguard-e-Dj-Diego-Kaos">05/07/13 - House no Deck - !FUCK YOU PLASTIC DJ! - Dj Andrew Thompson + Dj Marcelo Lifeguard e Dj Diego Kaos</option>
<option value="KF-Hip-House-26-DJ-King">15/05/13 - KF Hip House #26 - DJ King</option>
<option value="KF-Hip-House-27-DJ-Milk">22/05/13 - KF Hip House #27 - DJ Milk + B.Day Monique Bertolini</option>
<option value="KF-Hip-House-3-Dj-Milk">22/08/12 - KF Hip House #3 - Dj Milk</option>
<option value="KF-Hip-House-5-Dj-Hadji-Mc-Anao">05/09/12 - KF Hip House #5 - Dj Hadji + Mc Anão</option>
<option value="One-Year-Led---White-Party">05/11/10 - One Year Led - White Party</option>
<option value="Quarta-e-Quinta">29/08/12 - Quarta e Quinta</option>
<option value="Rodizio-de-Quarta">01/08/12 - Rodízio de Quarta</option>
<option value="Rodizio-dos-Amigos-7">17/10/12 - Rodízio dos Amigos #7</option>
<option value="Rodizio-dos-Amigos-8">07/11/12 - Rodízio dos Amigos #8</option>
<option value="Sirena-Tour-Sao-Jose-dos-Campos-2013-Pedro-Saab-Bassline-Southmen-Tom-Keller-Press-Kit">11/05/13 - Sirena Tour São José dos Campos 2013 - Pedro Saab + Bassline + Southmen + Tom Keller + Press Kit</option>
<option value="TRIBE-50-Edicao">07/07/12 - TRIBE - 50ª Edição - por Higor Bono e Walter Henrique</option>
<option value="Warm-up-Sirena-Tour-Dj-Dai-Ferreira-BDay-Bruna-Marcio-Dantas">03/05/13 - Warm-up Sirena Tour - Dj Däi Ferreira B.Day Bruna + Marcio Dantas</option>
<option value="Yazigi-apresenta-Halloween-no-PUB-Dj-Milk">31/10/12 - Yázigi apresenta Halloween no PUB - Dj Milk</option>
<option value="IM-Fest">30/10/10 - IM Fest</option>
<option value="OCTOBERPUB">24/10/10 - OCTOBERPUB</option>
<option value="After-House-Sessions-1-Edicao">04/12/11 - After House Sessions - 1º Edição</option>
<option value="Eclatt-6-anos---White-Party">06/11/10 - Eclatt 6 anos - White Party</option>
<option value="Opium-Sao-Paulo-Inauguracao">10/11/10 - Inauguração</option>
<option value="Rodizio-dos-Amigos-6">10/10/12 - Rodízio dos Amigos #6</option>
<option value="Victor-Ruiz-Any-Mello">22/02/13 - Victor Ruiz & Any Mello</option>
<option value="Drods-5-Anos">13/11/10 - Drods 5 Anos</option>
<option value="Festa-da-Grife-Forum">19/11/10 - Festa da Grife Forum</option>
<option value="Glam-Friday">29/10/10 - Glam Friday</option>
<option value="IM-Fest---Electrixx">04/12/10 - IM Fest - Electrixx</option>
<option value="Reason">23/11/10 - Reason</option>
<option value="Hot-Hot---Dj-Mandraks-Le-paladino-e-Gui-Rozelli">18/12/10 - Hot Hot - Dj Mandraks, Le paladino e Gui Rozelli.</option>
<option value="D-nox--Beckers">29/12/10 - D-nox & Beckers - Perfect Life</option>
<option value="Sirena-Tour-Mogi-das-Cruzes">04/08/12 - Sirena Tour Mogi das Cruzes</option>
<option value="Tequila-Night---Simone-Pellizari">17/12/10 - Tequila Night - Simone Pellizari</option>
<option value="Water-Republic-Anniversary-2010-">18/12/10 - Water Republic Anniversary 2010 </option>
<option value="Choperia-Loucos-Por-Futebol-Academia-F4-Fitness-Dj-Cristiane-selecao-">03/08/13 - Choperia Loucos Por Futebol - Academia F4 Fitness - Dj Cristiane (seleção) </option>
<option value="Choperia-Loucos-Por-Futebol-Sextaneja-2-David-Saconi">02/08/13 - Choperia Loucos Por Futebol - Sextaneja #2 - David Saconi</option>
<option value="Deep-Aires-Sunset-Hat-Party">01/05/13 - Deep Aires - Sunset Hat Party</option>
<option value="Dunluce-Connect-Gold-Label-Reserve-Dj-Juy">29/06/13 - Dunluce Connect - Gold Label Reserve - Dj Juy</option>
<option value="House-no-Deck-Press-KIT-Sirena-">24/08/12 - House no Deck - Press KIT - Sirena </option>
<option value="Inauguracao-VIP">15/01/11 - Inauguração-VIP</option>
<option value="KF-Hip-House-22-DJ-Hadji">17/04/13 - KF Hip House #22 - DJ Hadji</option>
<option value="KF-Hip-House-24-DJ-Milk-KL-Jay">01/05/13 - KF Hip House #24 - DJ Milk + KL Jay</option>
<option value="KF-Hip-House-25-DJ-Hadji">08/05/13 - KF Hip House #25 - DJ Hadji</option>
<option value="KF-Hip-House-30-Festa-dos-Solteiros-Dj-Milk">12/06/13 - KF Hip House #30 - Festa dos Solteiros - Dj Milk</option>
<option value="B-Day-Dalton-Duarte-Dj-Carlinhos-Silva-mendigo-Marcelo-Tromboni-e-Edu-Poppo">30/06/12 - B-Day Dalton Duarte - Dj Carlinhos Silva (mendigo), Marcelo Tromboni e Edu Poppo</option>
<option value="Ballrange-Party-Special-BDay-Danilo-Stellet-Dj-Fabricio-Miotto-Noggue">18/05/13 - Ballrange Party - Special B.Day Danilo Stellet + Dj Fabricio Miotto + Noggue</option>
<option value="Bassline">16/02/13 - Bassline</option>
<option value="Carnaval-KF-Night-Marketing-TERCA">12/02/13 - Carnaval KF Night Marketing TERÇA</option>
<option value="Circus-Folia-Rodrigo-Santafe-BDay-Renato-Natali-e-Claudia-Macedo">01/02/13 - Circus Folia - Rodrigo & Santafé - B.Day - Renato Natali e Claudia Macedo</option>
<option value="Comemoracao-4-Anos">17/07/13 - Comemoração 4 Anos</option>
<option value="DJ-Naccarati-Dj-Marcelo-Tromboni">01/11/12 - DJ Naccarati + Dj Marcelo Tromboni</option>
<option value="Eclatt-007-Female-Angels">13/08/11 - Eclatt 007 - Female Angels</option>
<option value="Festa-do-Branco-2011-DEEP-Entertainment-Terraco-Daslu">06/09/11 - Festa do Branco 2011 - DEEP Entertainment - Terraço Daslu</option>
<option value="House-no-Deck-Dj-Bruno-Mendez">21/09/12 - House no Deck - Dj Bruno Mendez</option>
<option value="House-no-Deck-Dj-Juy-Dj-Kleber-Alves-BDay-Dalton-Duarte-Ana-Claudia-Carvalho-Gerson-Dias">14/06/13 - House no Deck - Dj Juy + Dj Kleber Alves - B.Day Dalton Duarte + Ana Cláudia Carvalho + Gerson Dias</option>
<option value="House-no-Deck-Especial-Dia-das-Mulheres-Angel-Sun-BDay-Luiz-Gustavo">08/03/13 - House no Deck - Especial Dia das Mulheres - Angel Sun - B.Day Luiz Gustavo</option>
<option value="House-no-Deck-Warm-up-Eclatt-8-Anos">14/12/12 - House no Deck - Warm-up Eclatt 8 Anos - B.Day Grace Kelly</option>
<option value="Inauguracao-Glass-Club">22/01/11 - Inauguração Glass Club</option>
<option value="Jantar-a-la-carte">05/10/12 - Jantar a la carte</option>
<option value="Ketel-One-Cocktail's-Day">24/05/13 - Ketel One Cocktail's Day - Special B.Day Dj Carl + Kleber Alves</option>
<option value="KF-Hip-House-13-Dj-Milk">12/12/12 - KF Hip House #13 - Dj Milk</option>
<option value="KF-Hip-House-16-Dj-Milk">23/01/13 - KF Hip House #16 - Dj Milk - B.Day Bianca Rabelo =D</option>
<option value="KF-Hip-House-18-DJ-KL-Jay-DJ-Hadji">06/03/13 - KF Hip House #18 - DJ KL Jay + DJ Hadji</option>
<option value="KF-Hip-House-19-DJ-Puff">13/03/13 - KF Hip House #19 - DJ Puff</option>
<option value="KF-Hip-House-2-Dj-Hadji">15/08/12 - KF Hip House #2 - Dj Hadji</option>
<option value="KF-Hip-House-21-DJ-CIA-DJ-Milk">10/04/13 - KF Hip House #21 - DJ CIA + DJ Milk</option>
<option value="KF-Hip-House-4-Dj-Puff">29/08/12 - KF Hip House #4 - Dj Puff</option>
<option value="Parktronic">24/07/11 - Parktronic 2011</option>
<option value="Pre-Carnaval-Pub-Folia">27/01/13 - Pré Carnaval Pub Folia</option>
<option value="Quarta-dos-Amigos-10">19/12/12 - Quarta dos Amigos #10</option>
<option value="Quarta-dos-Amigos-12">05/03/13 - Quarta dos Amigos #12</option>
<option value="Quarta-dos-Amigos-9">12/12/12 - Quarta dos Amigos #9</option>
<option value="Reinauguracao">15/02/13 - Reinauguração</option>
<option value="Spring-Summer">23/09/12 - Spring Summer</option>
<option value="Warm-up-Electrance-6-Anos-">20/08/11 - Warm-up Electrance 6 Anos </option>
<option value="Welcome-Party-Dj-Andre-Marchezini-BDay-Aline-Zito">26/04/13 - Welcome Party - Dj André Marchezini + B.Day Aline Zito</option>
<option value="18-Anos-D-Nox-Beckers-Erick-Morillo">13/11/11 - 18 Anos D-Nox & Beckers + Erick Morillo</option>
<option value="Deep-Aires-Dj-Andre-Marchezini">17/04/13 - Deep Aires - Dj Andre Marchezini</option>
<option value="Friday-Connect-Dj-Nuts-Noise">29/03/13 - Friday Connect - Dj Nuts Noise</option>
<option value="House-no-Deck-Bassline-Edu-Reis-BDay-Walter-Henrique-e-Felix">19/04/13 - House no Deck - Bassline + Edu Reis - B.Day Walter Henrique e Felix</option>
<option value="House-no-Deck-Noggue-Marco-Aoki">22/02/13 - House no Deck - Noggue + Marco Aoki</option>
<option value="Housevalley-Dj-Vee-Brondi-Edu-Zottini-Bassline-Juy-Pedro-Saab">20/04/13 - Housevalley - Dj Vee Brondi + Edu Zottini + Bassline + Juy + Pedro Saab</option>
<option value="KF-Hip-House-20-DJ-Hadji-Anao-live-vocal">27/03/13 - KF Hip House #20 - DJ Hadji + Anão (live vocal)</option>
<option value="Lancamento-Hedkandi-BDay-Allan-Mello-Dj-Rubens-Jr">28/06/13 - Lançamento Hedkandi + B.Day Allan Mello - Dj Rubens Jr.</option>
<option value="Markus-Binapfl">04/02/11 - Markus Binapfl</option>
<option value="Molhe-On-The-Road-Evento-Exclusivo-no-Brasil-DJ-DEXTRO-MC-KATORZ">21/10/11 - Molhe On The Road - Evento Exclusivo no Brasil - DJ DEXTRO + MC KATORZ</option>
<option value="Raul-Boesel">13/08/11 - Raul Boesel</option>
<option value="AfterFest-Party-7">22/03/13 - AfterFest Party #7 + B.Day Tatiana Andrade</option>
<option value="DEXTERZ">30/04/11 - DEXTERZ</option>
<option value="DJ-Tubarao">08/07/11 - DJ Tubarão</option>
<option value="Electrance-6-Anos">27/08/11 - Electrance 6 Anos</option>
<option value="House-no-Deck-1-Ano-Parceria-Daltinho-Kengao-DJ-Felix">30/11/12 - House no Deck - 1 Ano - Parceria Daltinho + Kengão - DJ Felix</option>
<option value="House-no-Deck-AfterFest-5-Edicao">27/07/12 - House no Deck - AfterFest 5ª Edição</option>
<option value="House-no-Deck-Niver-Pamela-Rodrigues-Dj-Robson-Nogueira">17/08/12 - House no Deck - Niver Pâmela Rodrigues + Dj Robson Nogueira</option>
<option value="House-no-Deck-Phyton-DJ-Oca-">26/10/12 - House no Deck - Phyton - DJ Oca </option>
<option value="House-no-Deck-TOP-DJ-Nuts-Noise-Banda-Arena-Rock">13/07/12 - House no Deck - TOP DJ Nuts Noise + Banda Arena Rock</option>
<option value="KF-Hip-House-39-Dj-Hadji">21/08/13 - KF Hip House #39 - Dj Hadji</option>
<option value="LUMIERE-Ressaca-de-Carnaval">12/03/11 - LUMIÉRE - Ressaca de Carnaval</option>
<option value="Quarta-dos-Amigos-11">27/02/13 - Quarta dos Amigos #11</option>
<option value="Rafael-Noronha">19/03/11 - Rafael Noronha</option>
<option value="Rodizio-de-Quarta-2">05/09/12 - Rodízio de Quarta #2</option>
<option value="Rodizio-de-Quarta-15-08">15/08/12 - Rodízio de Quarta - 15/08</option>
<option value="Xurras4Friends-Plinio-Guizera-e-Tail">06/11/11 - Xurras4Friends - Plínio, Guizera e Tail</option>
<option value="Dj-Vitor-Lima">05/02/11 - Dj Vitor Lima</option>
<option value="Just-White-Dj-Edgard-Fontes-Remix">12/02/11 - Just White - Dj Edgard Fontes (Remix)</option>
<option value="Temporada-2011-DJ-Mayara-Leme">28/01/11 - Abertura Temporada 2011 - DJ Mayara Leme</option>
<option value="Electrance-2011">21/05/11 - Electrance 2011</option>
<option value="Sharp-Bend-Ricardo-Menga">27/08/11 - Sharp Bend + Ricardo Menga</option>
<option value="Aniversario-7-Anos-PUB">15/09/12 - Aniversário 7 Anos PUB</option>
<option value="Camarote-AfterFest-4-Anos-Dj-Felix-Dj-Rick-DUB">14/10/11 - Camarote AfterFest 4 Anos + Dj Felix + Dj Rick DUB</option>
<option value="Eclatt-Only-White-Eletrixx-Thricie">08/10/11 - Eclatt Only White - Eletrixx + Thricie</option>
<option value="Fabio-Castro-Clube-das-Mulheres">09/07/11 - Fabio Castro</option>
<option value="House-no-Deck-DJ-Gelipe-Faria-DJ-Jr-Fonseca-Encerramento-Fire-UP">03/08/12 - House no Deck - DJ Felipe Faria + DJ Jr. Fonseca - Encerramento Fire UP</option>
<option value="House-no-Deck-Dj-Felix-Robson-Nogueira">31/08/12 - House no Deck -Dj Felix + Robson Nogueira</option>
<option value="Imagine-Dj-Davison-Lemos">04/11/11 - Imagine - Dj Davison Lemos</option>
<option value="Just-Black-DJ-Viktor-Mora">04/11/11 - Just Black - DJ Viktor Mora</option>
<option value="KF-Hip-House-12-Dj-Hadji">07/11/12 - KF Hip House #12 - Dj Hadji</option>
<option value="KF-Hip-House-17-Negrali-DJ-Nene-DJ-Puff">20/02/13 - KF Hip House #17 - Negrali + DJ Nenê + DJ Puff</option>
<option value="KF-Hip-House-18-Dj-Milk-Groove-it">27/02/13 - KF Hip House #18 - Dj Milk + Groove It</option>
<option value="Marcelo-Sa-Pistinha-After-Renan-Noise">07/05/11 - Marcelo Sá, Pistinha After Renan Noise</option>
<option value="Rodizio-de-Quarta-3">26/09/12 - Rodízio de Quarta #3</option>
<option value="Super-Buddies-Thiago-Marques-Cassiano-E-Juliano-Urizzi-BDay-Felipe-Tavares-e-Joao-Marcelo-Andery">30/09/11 - Super Buddies - Thiago Marques, Cassiano E Juliano Urizzi B.Day Felipe Tavares e João Marcelo Andery</option>
<option value="TRIBALTECH-2012-The-End">29/09/12 - TRIBALTECH 2012 - The End - Fotógrafo: Higor Bono</option>
<option value="Warm-up-TRIBE-CLUB-Du-Serena-x-Dahan-Juy-Bassline">19/01/13 - Warm-up TRIBE CLUB - Du Serena x Dahan + Juy + Bassline</option>
<option value="[-Sexta-Premium-]-Raphael-Leandro-Bruno-Ray">26/08/11 - [ Sexta Premium ] - Raphael Leandro + Bruno & Ray</option>
<option value="AfterFest-4-Anos-DJ-Feio-Felix-Rodrigo-S-e-Renan-Noise">21/10/11 - AfterFest 4 Anos - DJ Feio, Felix, Rodrigo S. e Renan Noise</option>
<option value="CABARET-|-O-Melhor-do-Funk-Carioca">07/10/11 - CABARET | O Melhor do Funk Carioca</option>
<option value="Electrixx">22/03/13 - Electrixx</option>
<option value="House-4-Friends-1">09/09/11 - House 4 Friends #1</option>
<option value="House-no-Deck-Dj-Du-Aoki-Dj-Dirceu-Pires">10/08/12 - House no Deck - Dj Du Aoki + Dj Dirceu Pires</option>
<option value="KF-Hip-House-39-Dj-Milk">14/08/13 - KF Hip House #39 - Dj Milk</option>
<option value="SPYZER">25/11/11 - SPYZER</option>
<option value="Top-Secret-2-Edicao">01/10/11 - Top Secret - 2ª Edição</option>
<option value="Bruno-Barudi-Dj-Magui">25/01/13 - Bruno Barudi + Dj Magui</option>
<option value="House-no-DECK">02/12/11 - House no DECK</option>
<option value="KF-Hip-House-Dj-Puff-Dj-Hadji">08/08/12 - KF Hip House #1 - Dj Puff + Dj Hadji</option>
<option value="KF-Hip-House-9-Dj-Milk-BDay-Marilia-Maria">10/10/12 - KF Hip House #9 - Dj Milk + B.Day Marília Maria</option>
<option value="PINK-|-HIP-HOP-|-HOUSE-">26/08/11 - PINK | HIP-HOP | HOUSE </option>
<option value="Energia-na-Glass">14/05/11 - Energia na Glass</option>
<option value="Coquetel-Freixenet-Dj-Rubens-Junior">01/03/13 - Coquetel Freixenet - Dj Rubens Junior - B.Day Sabrina Rabelo</option>
<option value="Festa-do-Branco-2011">08/10/11 - Festa do Branco 2011</option>
<option value="House-no-Deck-Dj-Robson-Nogueira-2">12/07/13 - House no Deck - Dj Robson Nogueira #2</option>
<option value="Rodizio-dos-Amigos-4">19/09/12 - Rodízio dos Amigos #4</option>
<option value="Felguk">08/12/12 - Felguk</option>
<option value="House-no-Deck-Dj-Robson-Nogueira-The-Hitmakers">19/10/12 - House no Deck - Dj Robson Nogueira - The Hitmakers</option>
<option value="KF-Hip-House-8-Dj-Hadji">26/09/12 - KF Hip House #8 - Dj Hadji</option>
<option value="Boris-Brejcha-Du-Serena">22/10/11 - Boris Brejcha + Du Serena</option>
<option value="Gallery-Classic-House">01/11/11 - Gallery Classic House - Fabiano Salles</option>
<option value="House-no-Deck-Warm-UP-Space-Ibiza-Campos-do-Jordao">06/07/12 - House no Deck - Warm UP Space Ibiza Campos do Jordão</option>
<option value="Island-Pool-Party">29/10/11 - Island Pool Party</option>
<option value="KF-Hip-House-7-Dj-Milk">19/09/12 - KF Hip House #7 - Dj Milk</option>
<option value="KF-Hip-House-14-Dj-Milk">19/12/12 - KF Hip House #14 - Dj Milk</option>
<option value="Gabe-Wrecked-Machines">07/01/12 - Gabe - Wrecked Machines</option>
<option value="Aniversario-do-Kengao">13/01/12 - Aniversário do Kengão</option>
<option value="Tribe-Club-Summer-Edition">21/01/12 - Tribe Club - Summer Edition</option>
<option value="La-Madre-DJ-Robson-Nogueira">27/01/12 - La Madre - DJ Robson Nogueira</option>
<option value="Summer-After">26/02/12 - Summer After</option>
<option value="Reinauguracao-Pos-Reforma">24/02/12 - Reinauguração Pós Reforma</option>
<option value="Pub-in-the-House-BDay-Sabrina-Rabelo">29/02/12 - Pub in the House + B.Day Sabrina Rabelo</option>
<option value="House-no-Deck-Dj-Nicodemo-Banda-All-Star-40">02/03/12 - House no Deck - Dj Nicodemo + Banda All Star 40</option>
<option value="House-no-Deck-Niver-Allan-Mello-Magno-Nascimento-Vanessa-Cabral">29/06/12 - House no Deck - Niver Allan Mello, Magno Nascimento, Vanessa Cabral</option>
<option value="QUARTA-FEIRA">04/09/13 - QUARTA-FEIRA</option>
<option value="House-no-Deck-com-Du-Aoki-Marco-Aoki-e-Diego-Colombini">16/03/12 - House no Deck com Du Aoki, Marco Aoki e Diego Colombini</option>
<option value="House-no-Deck-AfterFest-2a-Edicao">09/03/12 - House no Deck - AfterFest 2ª Edição + B.Day Plínio Boucault</option>
<option value="House-4-Friends-DJ-Felix-Mixer-Live">17/03/12 - House 4 Friends - DJ Felix + Mixer Live</option>
<option value="Beach-Conception-Dj-Felix-Dj-Pedro-Scarpa">24/03/12 - Beach Conception - Dj Felix + Dj Pedro Scarpa</option>
<option value="Electrance-Preview-Indoor">29/01/12 - Electrance Preview Indoor</option>
<option value="Island-Pool-Party-II">17/03/12 - Island Pool Party II</option>
<option value="House-no-Deck-com-DJ-Robson-Nogueira">23/03/12 - House no Deck com DJ Robson Nogueira</option>
<option value="House-no-Deck-Dj-Du-Aoki-Niver-Juliete-Leal">11/05/12 - House no Deck - Dj Du Aoki + Niver Juliete Leal</option>
<option value="Ministry-of-Sound-Campinas-Camarote-Perfect-Life">30/03/12 - Ministry of Sound Campinas - Camarote Perfect Life</option>
<option value="House-no-Deck-AfterFest-3-Edicao-BDay-Rick-Afterfest-Dj-Felix-e-Juliana-Minini">27/04/12 - House no Deck - AfterFest 3ª Edição + B.Day Rick Afterfest, Dj Felix e Juliana Minini</option>
<option value="House-no-Deck-Niver-Danielle-Ferri-e-Maiara-Nozari-Dj-Robson-Nogueira-Dj-Marcelo-Tromboni">18/05/12 - House no Deck - Niver Danielle Ferri e Maiara Nozari - Dj Robson Nogueira, Dj Marcelo Tromboni</option>
<option value="House-no-Deck-Warm-Up-Fire-Up-Lounge-Dj-Edu-Zottini">21/06/13 - House no Deck - Warm Up Fire Up Lounge - Dj Edu Zottini</option>
<option value="Hip-House-Guten">01/06/12 - Hip-House Guten</option>
<option value="House-No-Deck-Dj-Robson-Nogueira">01/06/12 - House No Deck - Dj Robson Nogueira</option>
</select> </div>
<div id="comum">
<div class="info">
<h2>
Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 </h2>
<div class="compartilhar">
<div class="addthis_toolbox addthis_pill_combo">
<a class="addthis_button_tweet" tw:count="horizontal"></a>
<a class="addthis_button_facebook_like"></a>
<a class="addthis_counter addthis_pill_style"></a>
</div>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#username=afterfest"></script> </div>
</div>
<div id="listaFotos">
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 1" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 1"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 1"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 2" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 2"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 2"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 3" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 3"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 3"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 4" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 4"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 4"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 5" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 5"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 5"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 6" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 6"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 6"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 7" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 7"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 7"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 8" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 8"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 8"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 9" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 9"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 9"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 10" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 10"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 10"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 11" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 11"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 11"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 12" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 12"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 12"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 13" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 13"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 13"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 14" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 14"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 14"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 15" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 15"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 15"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 16" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 16"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 16"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 17" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 17"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 17"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 18" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 18"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 18"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 19" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 19"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 19"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 20" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 20"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 20"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 21" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 21"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 21"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 22" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 22"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 22"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 23" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 23"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 23"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 24" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 24"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 24"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 25" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 25"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 25"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 26" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 26"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 26"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 27" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 27"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 27"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 28" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 28"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 28"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 29" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 29"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 29"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 30" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 30"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 30"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 31" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 31"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 31"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 32" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 32"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 32"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 33" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 33"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 33"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 34" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 34"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 34"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 35" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 35"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 35"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 36" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 36"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 36"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 37" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 37"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 37"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 38" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 38"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 38"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 39" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 39"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 39"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 40" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 40"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 40"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 41" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 41"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 41"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 42" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 42"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 42"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 43" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 43"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 43"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 44" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 44"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 44"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 45" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 45"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 45"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 46" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 46"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 46"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 47" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 47"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 47"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084752-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 48" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084752-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 48"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 48"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 49" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 49"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 49"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 50" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 50"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 50"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 51" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 51"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 51"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 52" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 52"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 52"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 53" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 53"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 53"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 54" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 54"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 54"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 55" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 55"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 55"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 56" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 56"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 56"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 57" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 57"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 57"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 58" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 58"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 58"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 59" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 59"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 59"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 60" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 60"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 60"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 61" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 61"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 61"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 62" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 62"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 62"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 63" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 63"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 63"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084834-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 64" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084834-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 64"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 64"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 65" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 65"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 65"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 66" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 66"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 66"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 67" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 67"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 67"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 68" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 68"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 68"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 69" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 69"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 69"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 70" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 70"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 70"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 71" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 71"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 71"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 72" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 72"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 72"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 73" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 73"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 73"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 74" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 74"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 74"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 75" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 75"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 75"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 76" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 76"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 76"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 77" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 77"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 77"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 78" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 78"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 78"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 79" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 79"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 79"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084938-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 80" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084938-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 80"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 80"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 81" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 81"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 81"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 82" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 82"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 82"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 83" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 83"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 83"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 84" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 84"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 84"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 85" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 85"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 85"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 86" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 86"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 86"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 87" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 87"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 87"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 88" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 88"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 88"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 89" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 89"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 89"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 90" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 90"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 90"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 91" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 91"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 91"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 92" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 92"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 92"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 93" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 93"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 93"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 94" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 94"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 94"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 95" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 95"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 95"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085049-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 96" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085049-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 96"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 96"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 97" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 97"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 97"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 98" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 98"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 98"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 99" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 99"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 99"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 100" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 100"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 100"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 101" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 101"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 101"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 102" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 102"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 102"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 103" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 103"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 103"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 104" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 104"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 104"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 105" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 105"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 105"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 106" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 106"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 106"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 107" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 107"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 107"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 108" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 108"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 108"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 109" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 109"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 109"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 110" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 110"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 110"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 111" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 111"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 111"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085140-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 112" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085140-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 112"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 112"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 113" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 113"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 113"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 114" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 114"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 114"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 115" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 115"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 115"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 116" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 116"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 116"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 117" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 117"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 117"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 118" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 118"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 118"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 119" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 119"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 119"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 120" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 120"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 120"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 121" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 121"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 121"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 122" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 122"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 122"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 123" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 123"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 123"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 124" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 124"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 124"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 125" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 125"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 125"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 126" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 126"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 126"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 127" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 127"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 127"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085213-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 128" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085213-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 128"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 128"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 129" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 129"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 129"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 130" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 130"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 130"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 131" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 131"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 131"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 132" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 132"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 132"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 133" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 133"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 133"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 134" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 134"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 134"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 135" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 135"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 135"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 136" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 136"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 136"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 137" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 137"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 137"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 138" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 138"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 138"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 139" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 139"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 139"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 140" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 140"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 140"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 141" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 141"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 141"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 142" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 142"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 142"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 143" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 143"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 143"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085255-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 144" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085255-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 144"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 144"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 145" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 145"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 145"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 146" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 146"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 146"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 147" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 147"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 147"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 148" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 148"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 148"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 149" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 149"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 149"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 150" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 150"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 150"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 151" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 151"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 151"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 152" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 152"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 152"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 153" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 153"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 153"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 154" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 154"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 154"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 155" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 155"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 155"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 156" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 156"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 156"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 157" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 157"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 157"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 158" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 158"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 158"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 159" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 159"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 159"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085340-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 160" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085340-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 160"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 160"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 161" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 161"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 161"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 162" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 162"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 162"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 163" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 163"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 163"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 164" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 164"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 164"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 165" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 165"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 165"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 166" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 166"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 166"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 167" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 167"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 167"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 168" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 168"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 168"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 169" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 169"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 169"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 170" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 170"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 170"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 171" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 171"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 171"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 172" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 172"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 172"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 173" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 173"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 173"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 174" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 174"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 174"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 175" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 175"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 175"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085416-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 176" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085416-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 176"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 176"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 177" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 177"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 177"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 178" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 178"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 178"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 179" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 179"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 179"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 180" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 180"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 180"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 181" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 181"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 181"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 182" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 182"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 182"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 183" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 183"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 183"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 184" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 184"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 184"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 185" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 185"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 185"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 186" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 186"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 186"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 187" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 187"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 187"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 188" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 188"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 188"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 189" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 189"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 189"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 190" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 190"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 190"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 191" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 191"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 191"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085449-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 192" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085449-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 192"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 192"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 193" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 193"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 193"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 194" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 194"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 194"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 195" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 195"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 195"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 196" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 196"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 196"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 197" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 197"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 197"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 198" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 198"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 198"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 199" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 199"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 199"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 200" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 200"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 200"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 201" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 201"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 201"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 202" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 202"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 202"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 203" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 203"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 203"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 204" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 204"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 204"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 205" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 205"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 205"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 206" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 206"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 206"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085529-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 207" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085529-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 207"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 207"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085553-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 208" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085553-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 208"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 208"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085553-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 209" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085553-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 209"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 209"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085553-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 210" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085553-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 210"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 210"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085553-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 211" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085553-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 211"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 211"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-085553-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 212" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-085553-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 212"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 212"/></a>
</div>
<script type="text/javascript">
$(document).ready(function(){
$("#comum #listaFotos a").fancybox({
autoScale: false,
overlayOpacity: 0.9,
overlayColor: '#000',
padding: 0,
margin: 0
});
});
</script>
</div> <br class="clear"/>
</div>
<div id="rodape">
<div class="limitador">
<div class="coluna">
<img src="/img/rodape.logo.gif" />
<br />
<a class="block" href="mailto:contato@afterfest.com.br">contato@afterfest.com.br</a>
<br />
Copyright 2009 © AfterFest<br />
Todos os direitos reservados
</div>
<div class="coluna">
<ul class="nav">
<li class="item">
<a class="baladas" href="/baladas">baladas</a>
</li><li class="item">
<a class="bares" href="/bares">bares</a>
</li><li class="item">
<a class="noticias" href="/noticias">noticias</a>
</li><li class="item">
<a class="coberturas " href="/coberturas ">coberturas</a>
</li><li class="item">
<a class="agenda" href="/agenda">agenda</a>
</li><li class="item">
<a class="gatas" href="/gatas">gatas</a>
</li><li class="item">
<a class="contato" href="/contato">contatos</a>
</li><li class="item">
<a class="cadastro" href="/cadastro">equipe</a>
</li> </ul>
</div>
<div class="coluna social">
Siga-nos no
<a href="http://twitter.com/afterfest" target="_blank"><img src="/img/rodape.twitter.gif" /></a>
Participe da nossa<br />
comunidade no
<a href="http://www.orkut.com.br/Main#Community?cmm=33229667" target="_blank"><img src="/img/rodape.orkut.gif" /></a>
</div>
</div>
</div><script type="text/javascript">
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
</script>
<script type="text/javascript">
try {
var pageTracker = _gat._getTracker("UA-2530999-1");
pageTracker._trackPageview();
} catch(err) {}
</script>
</body>
</html>
'''
import urllib.request
tag = '/uploads/coberturas/XV-Edicao/'
site = 'http://afterfest.com.br/'
k = texto.find(tag)
while k != -1:
j = k + len(tag)
if texto[j] != 'm': # não é mini foto (thumbnail)
f = texto.find('"', j)
print(texto[j:f])
url = site + texto[k:f]
foto = urllib.request.urlopen(url).read()
file = open(texto[j:f], 'wb')
file.write(foto)
file.close()
k = texto.find(tag, k + 1)
| renebentes/Python4Zumbis | Códigos/TWP415.py | Python | mit | 119,764 | [
"Dalton"
] | 726ae569eb07dc2b7b6988d7fd78fc9828bee7c2abb8695d9d411caa25cc85c0 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
import scipy.constants as const
from pymatgen.core.structure import Structure
from pymatgen.util.coord import get_linear_interpolated_value
from monty.json import MSONable
from monty.functools import lazy_property
"""
This module defines classes to represent the phonon density of states, etc.
"""
BOLTZ_THZ_PER_K = const.value("Boltzmann constant in Hz/K") / const.tera # Boltzmann constant in THz/K
THZ_TO_J = const.value("hertz-joule relationship") * const.tera
def coth(x):
return 1.0 / np.tanh(x)
class PhononDos(MSONable):
"""
Basic DOS object. All other DOS objects are extended versions of this
object.
Args:
frequencies: A sequences of frequencies in THz
densities: A list representing the density of states.
"""
def __init__(self, frequencies, densities):
self.frequencies = np.array(frequencies)
self.densities = np.array(densities)
def get_smeared_densities(self, sigma):
"""
Returns the densities, but with a Gaussian smearing of
std dev sigma applied.
Args:
sigma: Std dev of Gaussian smearing function.
Returns:
Gaussian-smeared densities.
"""
from scipy.ndimage.filters import gaussian_filter1d
diff = [self.frequencies[i + 1] - self.frequencies[i]
for i in range(len(self.frequencies) - 1)]
avgdiff = sum(diff) / len(diff)
smeared_dens = gaussian_filter1d(self.densities, sigma / avgdiff)
return smeared_dens
def __add__(self, other):
"""
Adds two DOS together. Checks that frequency scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
if not all(np.equal(self.frequencies, other.frequencies)):
raise ValueError("Frequencies of both DOS are not compatible!")
densities = self.densities + other.densities
return PhononDos(self.frequencies, densities)
def __radd__(self, other):
"""
Reflected addition of two DOS objects
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
return self.__add__(other)
def get_interpolated_value(self, frequency):
"""
Returns interpolated density for a particular frequency.
Args:
frequency: frequency to return the density for.
"""
return get_linear_interpolated_value(self.frequencies,
self.densities, frequency)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
stringarray = ["#{:30s} {:30s}".format("Frequency", "Density")]
for i, frequency in enumerate(self.frequencies):
stringarray.append("{:.5f} {:.5f}"
.format(frequency, self.densities[i]))
return "\n".join(stringarray)
@classmethod
def from_dict(cls, d):
"""
Returns PhononDos object from dict representation of PhononDos.
"""
return cls(d["frequencies"], d["densities"])
def as_dict(self):
"""
Json-serializable dict representation of PhononDos.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"frequencies": list(self.frequencies),
"densities": list(self.densities)}
@lazy_property
def ind_zero_freq(self):
"""
Index of the first point for which the freqencies are equal or greater than zero.
"""
ind = np.searchsorted(self.frequencies, 0)
if ind >= len(self.frequencies):
raise ValueError("No positive frequencies found")
return ind
@lazy_property
def _positive_frequencies(self):
"""
Numpy array containing the list of positive frequencies
"""
return self.frequencies[self.ind_zero_freq:]
@lazy_property
def _positive_densities(self):
"""
Numpy array containing the list of densities corresponding to positive frequencies
"""
return self.densities[self.ind_zero_freq:]
def cv(self, t, structure=None):
"""
Constant volume specific heat C_v at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/(K*mol-c). A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/(K*mol)
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Constant volume specific heat C_v
"""
if t == 0:
return 0
freqs = self._positive_frequencies
dens = self._positive_densities
def csch2(x):
return 1.0 / (np.sinh(x) ** 2)
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
cv = np.trapz(wd2kt ** 2 * csch2(wd2kt) * dens, x=freqs)
cv *= const.Boltzmann * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
cv /= formula_units
return cv
def entropy(self, t, structure=None):
"""
Vibrational entropy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/(K*mol-c). A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/(K*mol)
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Vibrational entropy
"""
if t == 0:
return 0
freqs = self._positive_frequencies
dens = self._positive_densities
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
s = np.trapz((wd2kt * coth(wd2kt) - np.log(2 * np.sinh(wd2kt))) * dens, x=freqs)
s *= const.Boltzmann * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
s /= formula_units
return s
def internal_energy(self, t, structure=None):
"""
Phonon contribution to the internal energy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/mol
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Phonon contribution to the internal energy
"""
if t == 0:
return self.zero_point_energy(structure=structure)
freqs = self._positive_frequencies
dens = self._positive_densities
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
e = np.trapz(freqs * coth(wd2kt) * dens, x=freqs) / 2
e *= THZ_TO_J * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
e /= formula_units
return e
def helmholtz_free_energy(self, t, structure=None):
"""
Phonon contribution to the Helmholtz free energy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/mol
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Phonon contribution to the Helmholtz free energy
"""
if t == 0:
return self.zero_point_energy(structure=structure)
freqs = self._positive_frequencies
dens = self._positive_densities
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
f = np.trapz(np.log(2 * np.sinh(wd2kt)) * dens, x=freqs)
f *= const.Boltzmann * const.Avogadro * t
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
f /= formula_units
return f
def zero_point_energy(self, structure=None):
"""
Zero point energy energy of the system. Only positive frequencies will be used.
Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/mol
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Phonon contribution to the internal energy
"""
freqs = self._positive_frequencies
dens = self._positive_densities
zpe = 0.5 * np.trapz(freqs * dens, x=freqs)
zpe *= THZ_TO_J * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
zpe /= formula_units
return zpe
class CompletePhononDos(PhononDos):
"""
This wrapper class defines a total dos, and also provides a list of PDos.
Args:
structure: Structure associated with this particular DOS.
total_dos: total Dos for structure
pdoss: The pdoss are supplied as an {Site: Densities}
.. attribute:: pdos
Dict of partial densities of the form {Site:Densities}
"""
def __init__(self, structure, total_dos, pdoss):
super().__init__(
frequencies=total_dos.frequencies, densities=total_dos.densities)
self.pdos = {s: np.array(d) for s, d in pdoss.items()}
self.structure = structure
def get_site_dos(self, site):
"""
Get the Dos for a site.
Args:
site: Site in Structure associated with CompletePhononDos.
Returns:
PhononDos containing summed orbital densities for site.
"""
return PhononDos(self.frequencies, self.pdos[site])
def get_element_dos(self):
"""
Get element projected Dos.
Returns:
dict of {Element: Dos}
"""
el_dos = {}
for site, atom_dos in self.pdos.items():
el = site.specie
if el not in el_dos:
el_dos[el] = np.array(atom_dos)
else:
el_dos[el] += np.array(atom_dos)
return {el: PhononDos(self.frequencies, densities)
for el, densities in el_dos.items()}
@classmethod
def from_dict(cls, d):
"""
Returns CompleteDos object from dict representation.
"""
tdos = PhononDos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for at, pdos in zip(struct, d["pdos"]):
pdoss[at] = pdos
return cls(struct, tdos, pdoss)
def as_dict(self):
"""
Json-serializable dict representation of CompletePhononDos.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"frequencies": list(self.frequencies),
"densities": list(self.densities),
"pdos": []}
if len(self.pdos) > 0:
for at in self.structure:
d["pdos"].append(list(self.pdos[at]))
return d
def __str__(self):
return "Complete phonon DOS for " + str(self.structure)
| fraricci/pymatgen | pymatgen/phonon/dos.py | Python | mit | 13,471 | [
"Avogadro",
"Gaussian",
"pymatgen"
] | 4fdfb12f84969cb4d9b554e3373316e6603cb0ae070827f1821991760008ff82 |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lingvo Jax transformer layers."""
import itertools
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import numpy as jnp
from lingvo.core import gshard_builder
from lingvo.jax import base_layer
from lingvo.jax import py_utils
from lingvo.jax import test_utils
from lingvo.jax.layers import embedding_softmax
from lingvo.jax.layers import ngrammer
from lingvo.jax.layers import transformer_models
from lingvo.jax.layers import transformers
import numpy as np
import tensorflow.compat.v2 as tf
class TransformerModelsTest(test_utils.TestCase):
def setUp(self):
super().setUp()
np.random.seed(123456)
tf.random.set_seed(123)
@parameterized.parameters([True, False])
def test_transformer_bert(self, trainable_position_emb):
seq_len = 512
if trainable_position_emb:
position_emb_tpl = embedding_softmax.TrainablePositionalEmbedding.Params()
position_emb_tpl.max_seq_length = seq_len
else:
position_emb_tpl = embedding_softmax.PositionalEmbedding.Params()
p = transformer_models.TransformerLm.Params().Set(
name='bert_lm',
model_dims=32,
vocab_size=52,
position_emb_tpl=position_emb_tpl)
stacked_transformer_tpl = p.stacked_transformer_tpl
stacked_transformer_tpl.model_dims = 32
stacked_transformer_tpl.hidden_dims = 4 * 32
stacked_transformer_tpl.num_heads = 4
stacked_transformer_tpl.num_layers = 1
p.softmax_tpl.scale_sqrt_depth = True
batch_size = 8
bert_lm = p.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = bert_lm.instantiate_variables(prng_key)
input_ids = jax.random.randint(
jax.random.PRNGKey(1234), [batch_size, seq_len], 0, 51)
input_paddings = jnp.zeros([batch_size, seq_len])
input_weights = jnp.ones([batch_size, seq_len])
input_segment_ids = jnp.ones([batch_size, seq_len])
input_segment_pos = jnp.tile(
jnp.arange(0, seq_len)[jnp.newaxis, :], [batch_size, 1])
labels = py_utils.NestedMap()
labels.class_ids = input_ids
labels.class_weights = input_weights
outputs = test_utils.apply(
bert_lm,
initial_vars,
bert_lm.fprop,
input_ids,
input_paddings,
labels=labels,
segment_ids=input_segment_ids,
segment_pos=input_segment_pos)
logging.info('outputs: %s', outputs)
@parameterized.parameters(*list(itertools.product([True, False], repeat=3)))
def test_ngrammer_lm_extendstep(self, use_vq_ngrams, use_rotary_position_emb,
share_embedding_and_softmax):
vocab_size = 8
num_layers = 2
num_heads = 2
dim_per_head = 8
ngram_emb_dim = 4
if use_vq_ngrams:
ngrammer_params = ngrammer.VQNgrammer.Params().Set(
ngram_vocab_size=64,
ngram_emb_dim=ngram_emb_dim,
num_heads=num_heads,
concat_ngrams=True,
num_clusters=2,
dim_per_head=dim_per_head)
else:
ngrammer_params = ngrammer.Ngrammer.Params().Set(
ngram_vocab_size=64,
unigram_vocab_size=vocab_size,
ngram_emb_dim=ngram_emb_dim,
num_heads=num_heads,
concat_ngrams=True,
dim_per_head=dim_per_head)
p = transformer_models.TransformerLm.Params().Set(
name='jax_ngrammer_layer',
model_dims=num_heads * dim_per_head,
masked_lm=False,
packed_input=False,
ngrammer_tpl=ngrammer_params,
vocab_size=vocab_size)
stacked_transformer_tpl = p.stacked_transformer_tpl
stacked_transformer_tpl.model_dims = num_heads * dim_per_head
stacked_transformer_tpl.hidden_dims = 4 * num_heads * dim_per_head
stacked_transformer_tpl.num_heads = num_heads
stacked_transformer_tpl.num_layers = num_layers
if not share_embedding_and_softmax:
p.separate_embedding_tpl = embedding_softmax.SingleShardEmbedding.Params()
p.softmax_tpl = embedding_softmax.SingleShardFullSoftmax.Params()
# Rotary position embedding.
params = p.stacked_transformer_tpl.transformer_layer_params_tpl
params.tr_atten_tpl.use_rotary_position_emb = use_rotary_position_emb
seq_len = 4
batch_size = 2
transformer_lm = p.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = transformer_lm.instantiate_variables(prng_key)
initial_states = transformer_lm.init_states(batch_size, seq_len)
npy_inputs = np.random.randint(
vocab_size, size=(batch_size, seq_len)).astype('int32')
inputs = jnp.asarray(npy_inputs)
context_params = base_layer.JaxContext.Params().Set(do_eval=True)
with base_layer.JaxContext.new_context(
params=context_params,
prng_key=prng_key,
global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context:
jax_context.bind(transformer_lm,
transformer_lm.vars_to_flax_vars(initial_vars))
fprop_outputs = transformer_lm.fprop(inputs, jnp.zeros_like(inputs))
logits = fprop_outputs.logits
cached_states = initial_states
for t in range(seq_len):
if t > 0:
inputs_prefix = inputs[:, t - 1:t + 1]
else:
inputs_prefix = inputs[:, t]
cached_states, xent_output = transformer_lm.extend_step(
cached_states, inputs_prefix)
self.assertAllClose(logits[:, t, :], xent_output.logits)
@parameterized.parameters(*list(itertools.product([True, False], repeat=2)))
def test_primer_lm_extendstep(self, use_rotary_position_emb,
share_embedding_and_softmax):
vocab_size = 8
num_layers = 2
num_heads = 2
dim_per_head = 4
dconv_kernel_size = 3
p = transformer_models.TransformerLm.Params().Set(
name='jax_primer_layer',
model_dims=num_heads * dim_per_head,
masked_lm=False,
packed_input=False,
vocab_size=vocab_size)
stacked_transformer_tpl = p.stacked_transformer_tpl
stacked_transformer_tpl.model_dims = num_heads * dim_per_head
stacked_transformer_tpl.hidden_dims = 2 * num_heads * dim_per_head
stacked_transformer_tpl.num_heads = num_heads
stacked_transformer_tpl.num_layers = num_layers
if not share_embedding_and_softmax:
p.separate_embedding_tpl = embedding_softmax.SingleShardEmbedding.Params()
p.softmax_tpl = embedding_softmax.SingleShardFullSoftmax.Params()
seq_len = 4
batch_size = 3
# Turn on dconv as in Primer.
params = p.stacked_transformer_tpl.transformer_layer_params_tpl
params.tr_atten_tpl.dconv_qkv = True
# Rotary position embedding.
params = p.stacked_transformer_tpl.transformer_layer_params_tpl
params.tr_atten_tpl.dconv_kernel_size = dconv_kernel_size
params.tr_atten_tpl.use_rotary_position_emb = use_rotary_position_emb
transformer_lm = p.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = transformer_lm.instantiate_variables(prng_key)
initial_states = transformer_lm.init_states(batch_size, seq_len)
npy_inputs = np.random.randint(
vocab_size, size=(batch_size, seq_len)).astype('int32')
inputs = jnp.asarray(npy_inputs)
context_params = base_layer.JaxContext.Params().Set(do_eval=True)
with base_layer.JaxContext.new_context(
params=context_params,
prng_key=prng_key,
global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context:
jax_context.bind(transformer_lm,
transformer_lm.vars_to_flax_vars(initial_vars))
fprop_outputs = transformer_lm.fprop(inputs, jnp.zeros_like(inputs))
logits = fprop_outputs.logits
cached_states = initial_states
for t in range(seq_len):
cached_states, xent_output = transformer_lm.extend_step(
cached_states, inputs[:, t])
self.assertAllClose(logits[:, t, :], xent_output.logits)
@parameterized.parameters(*list(itertools.product([True, False], repeat=3)))
def test_ngrammer_primer_lm_extendstep(self, use_vq_ngrams,
use_rotary_position_emb,
share_embedding_and_softmax):
vocab_size = 8
num_layers = 2
num_heads = 2
dim_per_head = 8
ngram_emb_dim = 4
dconv_kernel_size = 3
if use_vq_ngrams:
ngrammer_params = ngrammer.VQNgrammer.Params().Set(
ngram_vocab_size=64,
ngram_emb_dim=ngram_emb_dim,
num_heads=num_heads,
concat_ngrams=True,
num_clusters=2,
dim_per_head=dim_per_head)
else:
ngrammer_params = ngrammer.Ngrammer.Params().Set(
ngram_vocab_size=64,
unigram_vocab_size=vocab_size,
ngram_emb_dim=ngram_emb_dim,
num_heads=num_heads,
concat_ngrams=True,
dim_per_head=dim_per_head)
p = transformer_models.TransformerLm.Params().Set(
name='jax_ngrammer_layer',
model_dims=num_heads * dim_per_head,
masked_lm=False,
packed_input=False,
ngrammer_tpl=ngrammer_params,
vocab_size=vocab_size)
stacked_transformer_tpl = p.stacked_transformer_tpl
stacked_transformer_tpl.model_dims = num_heads * dim_per_head
stacked_transformer_tpl.hidden_dims = 4 * num_heads * dim_per_head
stacked_transformer_tpl.num_heads = num_heads
stacked_transformer_tpl.num_layers = num_layers
if not share_embedding_and_softmax:
p.separate_embedding_tpl = embedding_softmax.SingleShardEmbedding.Params()
p.softmax_tpl = embedding_softmax.SingleShardFullSoftmax.Params()
seq_len = 4
batch_size = 2
# Turn on dconv as in Primer.
params = p.stacked_transformer_tpl.transformer_layer_params_tpl
params.tr_atten_tpl.dconv_qkv = True
params.tr_atten_tpl.dconv_kernel_size = dconv_kernel_size
# Rotary position embedding.
params = p.stacked_transformer_tpl.transformer_layer_params_tpl
params.tr_atten_tpl.use_rotary_position_emb = use_rotary_position_emb
transformer_lm = p.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = transformer_lm.instantiate_variables(prng_key)
initial_states = transformer_lm.init_states(batch_size, seq_len)
npy_inputs = np.random.randint(
vocab_size, size=(batch_size, seq_len)).astype('int32')
inputs = jnp.asarray(npy_inputs)
context_params = base_layer.JaxContext.Params().Set(do_eval=True)
with base_layer.JaxContext.new_context(
params=context_params,
prng_key=prng_key,
global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context:
jax_context.bind(transformer_lm,
transformer_lm.vars_to_flax_vars(initial_vars))
fprop_outputs = transformer_lm.fprop(inputs, jnp.zeros_like(inputs))
logits = fprop_outputs.logits
cached_states = initial_states
for t in range(seq_len):
if t > 0:
inputs_prefix = inputs[:, t - 1:t + 1]
else:
inputs_prefix = inputs[:, t]
cached_states, xent_output = transformer_lm.extend_step(
cached_states, inputs_prefix)
self.assertAllClose(logits[:, t, :], xent_output.logits)
@parameterized.parameters(*list(itertools.product([True, False], repeat=8)))
def test_transformer_encoder_decoder_extendstep(
self, use_encoder_ngrams, use_decoder_ngrams, use_encoder_vq_ngrams,
use_decoder_vq_ngrams, use_rotary_position_emb,
separate_encoder_embedding, separate_decoder_embedding,
use_stacked_transformer_repeated):
vocab_size = 4
num_layers = 2
num_heads = 2
dim_per_head = 4
ngram_emb_dim = 2
encoder_ngrammer_params = None
decoder_ngrammer_params = None
if use_encoder_vq_ngrams:
encoder_ngrammer_params = ngrammer.VQNgrammer.Params().Set(
ngram_vocab_size=8,
ngram_emb_dim=ngram_emb_dim,
num_heads=num_heads,
concat_ngrams=True,
num_clusters=2,
dim_per_head=dim_per_head)
if use_encoder_ngrams:
encoder_ngrammer_params = ngrammer.Ngrammer.Params().Set(
ngram_vocab_size=16,
unigram_vocab_size=vocab_size,
ngram_emb_dim=ngram_emb_dim,
num_heads=num_heads,
concat_ngrams=True,
dim_per_head=dim_per_head)
if use_decoder_vq_ngrams:
decoder_ngrammer_params = ngrammer.VQNgrammer.Params().Set(
ngram_vocab_size=8,
ngram_emb_dim=ngram_emb_dim,
num_heads=num_heads,
concat_ngrams=True,
num_clusters=2,
dim_per_head=dim_per_head)
if use_decoder_ngrams:
decoder_ngrammer_params = ngrammer.Ngrammer.Params().Set(
ngram_vocab_size=16,
unigram_vocab_size=vocab_size,
ngram_emb_dim=ngram_emb_dim,
num_heads=num_heads,
concat_ngrams=True,
dim_per_head=dim_per_head)
p = transformer_models.TransformerEncoderDecoder.Params().Set(
name='jax_transformer_encoder_decoder',
model_dims=num_heads * dim_per_head,
decoder_ngrammer_tpl=decoder_ngrammer_params,
encoder_ngrammer_tpl=encoder_ngrammer_params)
# Encoder stack.
if use_stacked_transformer_repeated:
block_param = transformers.StackedTransformer.Params().Set(
num_layers=num_layers,
num_heads=num_heads,
model_dims=num_heads * dim_per_head,
hidden_dims=num_heads * dim_per_head,
mask_self_attention=False,
fold_padding_with_segment_mask=True)
p.encoder_stacked_transformer_tpl = (
transformers.StackedTransformerRepeated.Params().Set(
block=block_param, x_times=1))
else:
p.encoder_stacked_transformer_tpl = (
transformers.StackedTransformer.Params().Set(
model_dims=num_heads * dim_per_head,
hidden_dims=num_heads * dim_per_head,
num_heads=num_heads,
num_layers=num_layers,
mask_self_attention=False,
fold_padding_with_segment_mask=True))
# Decoder stack.
if use_stacked_transformer_repeated:
block_param = transformers.StackedTransformer.Params().Set(
num_layers=num_layers,
num_heads=num_heads,
model_dims=num_heads * dim_per_head,
hidden_dims=num_heads * dim_per_head,
mask_self_attention=True,
fold_padding_with_segment_mask=True)
p.decoder_stacked_transformer_tpl = (
transformers.StackedTransformerRepeated.Params().Set(
block=block_param, x_times=1))
else:
p.decoder_stacked_transformer_tpl = (
transformers.StackedTransformer.Params().Set(
model_dims=num_heads * dim_per_head,
hidden_dims=num_heads * dim_per_head,
num_heads=num_heads,
num_layers=num_layers,
mask_self_attention=True,
fold_padding_with_segment_mask=True))
if separate_encoder_embedding:
p.encoder_embedding_tpl = (
embedding_softmax.SingleShardEmbedding.Params().Set(
vocab_size=vocab_size, embedding_dims=num_heads * dim_per_head))
if separate_decoder_embedding:
p.decoder_embedding_tpl = (
embedding_softmax.SingleShardEmbedding.Params().Set(
vocab_size=vocab_size, embedding_dims=num_heads * dim_per_head))
# Softmax params.
if separate_decoder_embedding:
p.softmax_tpl = embedding_softmax.SingleShardFullSoftmax.Params().Set(
input_dims=num_heads * dim_per_head, num_classes=vocab_size)
else:
p.softmax_tpl = (
embedding_softmax.SingleShardSharedEmbeddingSoftmax.Params().Set(
input_dims=num_heads * dim_per_head, num_classes=vocab_size))
# Rotary position embedding.
if use_rotary_position_emb:
if use_stacked_transformer_repeated:
params = p.encoder_stacked_transformer_tpl.block
else:
params = p.encoder_stacked_transformer_tpl
params = params.transformer_layer_params_tpl
params.tr_atten_tpl.use_rotary_position_emb = use_rotary_position_emb
if use_stacked_transformer_repeated:
params = p.decoder_stacked_transformer_tpl.block
else:
params = p.decoder_stacked_transformer_tpl
params = params.transformer_layer_params_tpl
params.tr_atten_tpl.use_rotary_position_emb = use_rotary_position_emb
p.position_emb_tpl = None
seq_len = 4
batch_size = 1
transformer_enc_dec = p.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = transformer_enc_dec.instantiate_variables(prng_key)
npy_inputs = np.random.randint(
vocab_size, size=(batch_size, seq_len)).astype('int32')
npy_input_paddings = np.random.randint(0, 2, size=(batch_size, seq_len))
npy_targets = np.random.randint(
vocab_size, size=(batch_size, seq_len)).astype('int32')
inputs = jnp.asarray(npy_inputs)
input_paddings = jnp.asarray(npy_input_paddings)
targets = jnp.asarray(npy_targets)
context_params = base_layer.JaxContext.Params().Set(do_eval=True)
with base_layer.JaxContext.new_context(
params=context_params,
prng_key=prng_key,
global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context:
jax_context.bind(transformer_enc_dec,
transformer_enc_dec.vars_to_flax_vars(initial_vars))
initial_states = transformer_enc_dec.init_states(inputs, input_paddings,
batch_size, seq_len)
fprop_outputs = transformer_enc_dec.fprop(inputs, input_paddings, targets,
jnp.zeros_like(targets))
logits = fprop_outputs.logits
cached_states = initial_states
for t in range(seq_len):
targets_prefix = targets[:, t]
if use_decoder_ngrams or use_decoder_vq_ngrams:
if t > 0:
targets_prefix = targets[:, t - 1:t + 1]
cached_states, xent_output = transformer_enc_dec.extend_step(
cached_states, targets_prefix)
self.assertAllClose(logits[:, t, :], xent_output.logits, atol=2e-6)
def test_glam_unitransformer(self):
batch = 2
length = 3
d_model = 6
num_heads = 2
vocab_size = 16
ff_dim = 8
c_dim = 3
e_dim = 2
num_layers = 4
# Build jax layer
jax_p = transformer_models.TransformerLm.GLaMUniTransformerParams(
name='model',
vocab_size=vocab_size,
num_transformer_layers=num_layers,
moe=True,
model_dim=d_model,
ff_dim=ff_dim,
moe_hidden_dim=ff_dim,
attention_num_heads=num_heads,
attention_key_value_dim=d_model // num_heads,
attention_extra_logit=0.0,
use_tgt_labels_size_as_loss_denominator=True,
moe_load_balance_loss_weight=0.01,
z_loss_weight=1e-4,
c_dim=c_dim,
e_dim=e_dim)
assert jax_p.packed_input
jax_layer = jax_p.Instantiate()
prng_key = jax.random.PRNGKey(seed=42)
jax_vars = jax_layer.instantiate_variables(prng_key)
builder_p = gshard_builder.DenseBuilder.Params().Set(
num_groups=1,
second_expert_policy='all',
relative_attention_type='bias',
model_dim=d_model,
attention_key_value_dim=d_model // num_heads,
attention_num_heads=num_heads,
attention_combine_dims=True,
c_dim=c_dim,
capacity_factor=None,
attention_extra_logit=0.0,
e_dim=e_dim,
moe_hidden_dim=ff_dim,
ff_dim=ff_dim)
tf_layer = gshard_builder.UniTransformer.Params().Set(
name='model',
num_transformer_layers=num_layers,
builder=builder_p,
vocab_size=vocab_size,
sequence_length=length,
label_smoothing=0,
aux_loss_coef=0.01,
z_loss=1e-4,
use_tgt_labels_size_as_loss_denominator=True,
positional_embedding=False,
gated_gelu=True,
moe=True).Instantiate()
# Build Jax Inputs
np.random.seed(42)
npy_ids = np.random.randint(0, vocab_size - 1, [batch, length])
jax_ids = jnp.asarray(npy_ids)
npy_paddings = np.array([[0, 0, 1], [0, 0, 1]], dtype=np.float32)
jax_paddings = jnp.asarray(npy_paddings)
npy_segment_ids = np.array([[1, 2, 0], [1, 1, 0]], dtype=np.int32)
npy_segment_pos = np.array([[0, 0, 0], [0, 1, 0]], dtype=np.int32)
npy_labels = np.roll(npy_ids, -1, axis=1)
jax_labels = jnp.asarray(npy_labels)
jax_seg_ids = jnp.asarray(npy_segment_ids)
jax_seg_pos = jnp.asarray(npy_segment_pos)
jax_label_weighs = jnp.asarray([[1, 1, 0], [1, 1, 0]])
# Build TF Inputs
tf_tgt_inputs = py_utils.NestedMap(
ids=tf.convert_to_tensor(npy_ids, dtype=tf.int32),
labels=tf.convert_to_tensor(npy_labels, dtype=tf.int32),
segment_ids=tf.convert_to_tensor(npy_segment_ids, dtype=tf.int32),
segment_pos=tf.convert_to_tensor(npy_segment_pos, dtype=tf.int32))
tf_inputs = py_utils.NestedMap(tgt=tf_tgt_inputs)
# Compute jax outputs
jax_outputs = test_utils.apply(
jax_layer,
jax_vars,
jax_layer.fprop,
jax_ids,
jax_paddings,
context_p=None,
labels=py_utils.NestedMap(
class_ids=jax_labels,
class_weights=jax_label_weighs,
),
segment_ids=jax_seg_ids,
segment_pos=jax_seg_pos)
# Copy jax vars to tf ones.
tf_theta = tf_layer.theta.DeepCopy()
# GShardBuilder softmax weight use self.vars rather than theta.
tf_layer.vars.dec_emb.w.embedding.assign(jax_vars.softmax.embedding.w)
tf_theta.dec_emb.w.embedding = jax_vars.softmax.embedding.w
tf_theta.dec.final_layer_norm.w.scale = jax_vars.final_ln.scale
jax_layer_0_var = tf.nest.map_structure(
lambda v: jnp.squeeze(jnp.split(v, 2)[0], axis=0),
jax_vars.transformer.repeat.sub.x_layers[0])
tf_theta.dec.layer_000.ln.w.scale = jax_layer_0_var.layer_norm.scale
jax_atten_var = jax_layer_0_var.self_attention
tf_atten_var = tf_theta.dec.layer_000.dec_self_attention
tf_atten_var.w.wk = jax_atten_var.key.w
tf_atten_var.w.wq = jax_atten_var.query.w
tf_atten_var.w.wv = jax_atten_var.value.w
tf_atten_var.w.wo = jax_atten_var.post.w
tf_atten_var.wrb.wrb = jax_atten_var.relative_bias.wrb
jax_moe_var = jax_layer_0_var.ff_layer
tf_theta.dec.layer_001.ln.w.scale = jax_moe_var.layer_norm.scale
tf_theta.dec.layer_001.moe.ffw.top_2_gating.w = jax_moe_var.gate
tf_theta.dec.layer_001.moe.moe.wi = jax_moe_var.wi_0
tf_theta.dec.layer_001.moe.moe.wo = jax_moe_var.wo_0
jax_layer_1_var = tf.nest.map_structure(
lambda v: jnp.squeeze(jnp.split(v, 2)[0], axis=0),
jax_vars.transformer.repeat.sub.x_layers[1])
tf_theta.dec.layer_002.ln.w.scale = jax_layer_1_var.layer_norm.scale
jax_atten_var = jax_layer_1_var.self_attention
tf_atten_var = tf_theta.dec.layer_002.dec_self_attention
tf_atten_var.w.wk = jax_atten_var.key.w
tf_atten_var.w.wq = jax_atten_var.query.w
tf_atten_var.w.wv = jax_atten_var.value.w
tf_atten_var.w.wo = jax_atten_var.post.w
tf_atten_var.wrb.wrb = jax_atten_var.relative_bias.wrb
jax_ffn_var = jax_layer_1_var.ff_layer
tf_ffn_var = tf_theta.dec.layer_003.dense_relu_dense
tf_ffn_var.w.wi_0 = jax_ffn_var.ffn_layer1_gate.linear.w
tf_ffn_var.w.wi_1 = jax_ffn_var.ffn_layer1.linear.w
tf_ffn_var.w.wo = jax_ffn_var.ffn_layer2.linear.w
tf_theta.dec.layer_003.ln.w.scale = jax_ffn_var.layer_norm.scale
jax_layer_2_var = tf.nest.map_structure(
lambda v: jnp.squeeze(jnp.split(v, 2)[1], axis=0),
jax_vars.transformer.repeat.sub.x_layers[0])
tf_theta.dec.layer_004.ln.w.scale = jax_layer_2_var.layer_norm.scale
jax_atten_var = jax_layer_2_var.self_attention
tf_atten_var = tf_theta.dec.layer_004.dec_self_attention
tf_atten_var.w.wk = jax_atten_var.key.w
tf_atten_var.w.wq = jax_atten_var.query.w
tf_atten_var.w.wv = jax_atten_var.value.w
tf_atten_var.w.wo = jax_atten_var.post.w
tf_atten_var.wrb.wrb = jax_atten_var.relative_bias.wrb
jax_moe_var = jax_layer_2_var.ff_layer
tf_theta.dec.layer_005.ln.w.scale = jax_moe_var.layer_norm.scale
tf_theta.dec.layer_005.moe.ffw.top_2_gating.w = jax_moe_var.gate
tf_theta.dec.layer_005.moe.moe.wi = jax_moe_var.wi_0
tf_theta.dec.layer_005.moe.moe.wo = jax_moe_var.wo_0
jax_layer_3_var = tf.nest.map_structure(
lambda v: jnp.squeeze(jnp.split(v, 2)[1], axis=0),
jax_vars.transformer.repeat.sub.x_layers[1])
tf_theta.dec.layer_006.ln.w.scale = jax_layer_3_var.layer_norm.scale
jax_atten_var = jax_layer_3_var.self_attention
tf_atten_var = tf_theta.dec.layer_006.dec_self_attention
tf_atten_var.w.wk = jax_atten_var.key.w
tf_atten_var.w.wq = jax_atten_var.query.w
tf_atten_var.w.wv = jax_atten_var.value.w
tf_atten_var.w.wo = jax_atten_var.post.w
tf_atten_var.wrb.wrb = jax_atten_var.relative_bias.wrb
jax_ffn_var = jax_layer_3_var.ff_layer
tf_ffn_var = tf_theta.dec.layer_007.dense_relu_dense
tf_ffn_var.w.wi_0 = jax_ffn_var.ffn_layer1_gate.linear.w
tf_ffn_var.w.wi_1 = jax_ffn_var.ffn_layer1.linear.w
tf_ffn_var.w.wo = jax_ffn_var.ffn_layer2.linear.w
tf_theta.dec.layer_007.ln.w.scale = jax_ffn_var.layer_norm.scale
tf_theta = test_utils.to_tf_nmap(tf_theta)
# Compute TF outputs
tf_out, _ = tf_layer.FProp(tf_theta, tf_inputs)
self.assertAllClose(
test_utils.to_np(jax_outputs.total_loss),
test_utils.to_np(tf_out['loss'][0]))
@parameterized.parameters([True, False])
def test_glam_unitransformer_extendstep(self, moe):
batch = 1
length = 3
d_model = 6
num_heads = 2
vocab_size = 16
ff_dim = 8
c_dim = 3
e_dim = 4
num_layers = 4
# Build jax layer
transformer_lm = transformer_models.TransformerLm.GLaMUniTransformerParams(
name='model',
vocab_size=vocab_size,
num_transformer_layers=num_layers,
moe=moe,
model_dim=d_model,
ff_dim=ff_dim,
moe_hidden_dim=ff_dim,
attention_num_heads=num_heads,
attention_key_value_dim=d_model // num_heads,
attention_extra_logit=0.0,
use_tgt_labels_size_as_loss_denominator=True,
moe_load_balance_loss_weight=0.01,
num_groups=1,
z_loss_weight=1e-4,
c_dim=c_dim,
e_dim=e_dim).Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = transformer_lm.instantiate_variables(prng_key)
npy_inputs = np.random.randint(
vocab_size, size=(batch, length)).astype('int32')
inputs = jnp.asarray(npy_inputs)
context_params = base_layer.JaxContext.Params().Set(do_eval=True)
with base_layer.JaxContext.new_context(
params=context_params,
prng_key=prng_key,
global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context:
jax_context.bind(transformer_lm,
transformer_lm.vars_to_flax_vars(initial_vars))
initial_states = transformer_lm.init_states(batch, length)
fprop_outputs = transformer_lm.fprop(inputs, jnp.zeros_like(inputs))
logits = fprop_outputs.logits
cached_states = initial_states
for t in range(length):
cached_states, xent_output = transformer_lm.extend_step(
cached_states, inputs[:, t])
self.assertAllClose(logits[:, t, :], xent_output.logits, atol=1e-5,
rtol=1e-5)
if __name__ == '__main__':
absltest.main()
| tensorflow/lingvo | lingvo/jax/layers/transformer_models_test.py | Python | apache-2.0 | 28,346 | [
"MOE"
] | b9fbca0da53e9f4b9b190a63f2896381b6238f1b723858cacec4373b1b6565b2 |
#! Tests out the CG solver with CPHF Polarizabilities
import time
import numpy as np
import psi4
#psi4.set_output_file("output.dat")
# Benzene
mol = psi4.geometry("""
0 1
O 0.000000000000 0.000000000000 -0.075791843589
H 0.000000000000 -0.866811828967 0.601435779270
H 0.000000000000 0.866811828967 0.601435779270
symmetry c1
""")
psi4.set_options({"basis": "aug-cc-pVDZ",
"scf_type": "df",
"e_convergence": 1e-8,
"save_jk": True,
})
scf_e, scf_wfn = psi4.energy("SCF", return_wfn=True)
# Orbitals
Co = scf_wfn.Ca_subset("AO", "OCC")
Cv = scf_wfn.Ca_subset("AO", "VIR")
# Mints object
mints = psi4.core.MintsHelper(scf_wfn.basisset())
# RHS Dipoles
dipoles_xyz = []
for dip in mints.ao_dipole():
Fia = psi4.core.Matrix.triplet(Co, dip, Cv, True, False, False)
Fia.scale(-2.0)
dipoles_xyz.append(Fia)
# Build up the preconditioner
precon = psi4.core.Matrix(Co.shape[1], Cv.shape[1])
occ = np.array(scf_wfn.epsilon_a_subset("AO", "OCC"))
vir = np.array(scf_wfn.epsilon_a_subset("AO", "VIR"))
precon.np[:] = (-occ.reshape(-1, 1) + vir)
# Build a preconditioner function
def precon_func(matrices, active_mask):
ret = []
for act, mat in zip(active_mask, matrices):
if act:
p = mat.clone()
p.apply_denominator(precon)
ret.append(p)
else:
ret.append(False)
return ret
def wrap_Hx(matrices, active_mask):
x_vec = [mat for act, mat in zip(active_mask, matrices) if act]
Hx_vec = scf_wfn.cphf_Hx(x_vec)
ret = []
cnt = 0
for act, mat in zip(active_mask, matrices):
if act:
ret.append(Hx_vec[cnt])
cnt += 1
else:
ret.append(False)
return ret
# Solve
ret, resid = psi4.p4util.solvers.cg_solver(dipoles_xyz, wrap_Hx, precon_func, rcond=1.e-6)
polar = np.empty((3, 3))
for numx in range(3):
for numf in range(3):
polar[numx, numf] = -1 * ret[numx].vector_dot(dipoles_xyz[numf])
psi4.core.print_out("\n " + "CPHF Dipole Polarizability:".center(44) + "\n");
tops = ("X", "Y", "Z")
psi4.core.print_out(" %12s %12s %12s\n" % tops);
for n, p in enumerate(tops):
psi4.core.print_out(" %3s %12.4f %12.4f %12.4f\n" % (p, polar[n][0], polar[n][1], polar[n][2]));
psi4.core.print_out("\n")
| amjames/psi4 | samples/psi4numpy/cphf/input.py | Python | lgpl-3.0 | 2,446 | [
"Psi4"
] | 2012341d4343943070e3daaa7ade51eeda9cd5a032a7a4525f7ec91ff4a84862 |
general_issue_codes = {
"ACC": "Accounting",
"ADV": "Advertising",
"AER": "Aerospace",
"AGR": "Agriculture",
"ALC": "Alcohol & Drug Abuse",
"ANI": "Animals",
"APP": "Apparel/Clothing Industry/Textiles",
"ART": "Arts/Entertainment",
"AUT": "Automotive Industry",
"AVI": "Aviation/Aircraft/Airlines",
"BAN": "Banking",
"BNK": "Bankruptcy",
"BEV": "Beverage Industry",
"BUD": "Budget/Appropriations",
"CHM": "Chemicals/Chemical Industry",
"CIV": "Civil Rights/Civil Liberties",
"CAW": "Clean Air & Water (Quality)",
"CDT": "Commodities (Big Ticket)",
"COM": "Communications/Broadcasting/Radio/TV",
"CPI": "Computer Industry",
"CSP": "Consumer Issues/Safety/Protection",
"CON": "Constitution",
"CPT": "Copyright/Patent/Trademark",
"DEF": "Defense",
"DOC": "District of Columbia",
"DIS": "Disaster Planning/Emergencies",
"ECN": "Economics/Economic Development",
"EDU": "Education",
"ENG": "Energy/Nuclear",
"ENV": "Environmental/Superfund",
"FAM": "Family Issues/Abortion/Adoption",
"FIR": "Firearms/Guns/Ammunition",
"FIN": "Financial Institutions/Investments/Securities",
"FOO": "Food Industry (Safety, Labeling, etc.)",
"FOR": "Foreign Relations",
"FUE": "Fuel/Gas/Oil",
"GAM": "Gaming/Gambling/Casino",
"GOV": "Government Issues",
"HCR": "Health Issues",
"HOM": "Homeland Security",
"HOU": "Housing",
"IMM": "Immigration",
"IND": "Indian/Native American Affairs",
"INS": "Insurance",
"INT": "Intelligence and Surveillance",
"LBR": "Labor Issues/Antitrust/Workplace",
"LAW": "Law Enforcement/Crime/Criminal Justice",
"MAN": "Manufacturing",
"MAR": "Marine/Maritime/Boating/Fisheries",
"MIA": "Media (Information/Publishing)",
"MED": "Medical/Disease Research/Clinical Labs",
"MMM": "Medicare/Medicaid",
"MON": "Minting/Money/Gold Standard",
"NAT": "Natural Resources",
"PHA": "Pharmacy",
"POS": "Postal",
"RRR": "Railroads",
"RES": "Real Estate/Land Use/Conservation",
"REL": "Religion",
"RET": "Retirement",
"ROD": "Roads/Highway",
"SCI": "Science/Technology",
"SMB": "Small Business",
"SPO": "Sports/Athletics",
"TAR": "Miscellaneous Tariff Bills",
"TAX": "Taxation/Internal Revenue Code",
"TEC": "Telecommunications",
"TOB": "Tobacco",
"TOR": "Torts",
"TRD": "Trade (Domestic & Foreign)",
"TRA": "Transportation",
"TOU": "Travel/Tourism",
"TRU": "Trucking/Shipping",
"URB": "Urban Development/Municipalities",
"UNM": "Unemployment",
"UTI": "Utilities",
"VET": "Veterans",
"WAS": "Waste (hazardous/solid/interstate/nuclear)",
"WEL": "Welfare"
}
filing_types = {
"REGISTRATION": "registration",
"REGISTRATION AMENDMENT": "registration_amendment",
"MID-YEAR TERMINATION (NO ACTIVITY)": 'termination',
"YEAR-END TERMINATION (NO ACTIVITY)": 'termination',
"YEAR-END TERMINATION": 'termination',
"MID-YEAR TERMINATION": 'termination',
"FOURTH QUARTER TERMINATION": 'termination',
"SECOND QUARTER TERMINATION": 'termination',
"FIRST QUARTER TERMINATION": 'termination',
"FIRST QUARTER TERMINATION (NO ACTIVITY)": 'termination',
"THIRD QUARTER TERMINATION": 'termination',
"THIRD QUARTER TERMINATION (NO ACTIVITY)": 'termination',
"FOURTH QUARTER TERMINATION (NO ACTIVITY)": 'termination',
"SECOND QUARTER TERMINATION (NO ACTIVITY)": 'termination',
"YEAR-END REPORT": 'report',
"YEAR-END (NO ACTIVITY)": 'report',
"MID-YEAR REPORT": 'report',
"MID-YEAR (NO ACTIVITY)": 'report',
"SECOND QUARTER REPORT": 'report',
"FIRST QUARTER REPORT": 'report',
"FOURTH QUARTER REPORT": 'report',
"THIRD QUARTER REPORT": 'report',
"SECOND QUARTER (NO ACTIVITY)": 'report',
"FIRST QUARTER (NO ACTIVITY)": 'report',
"FOURTH QUARTER (NO ACTIVITY)": 'report',
"THIRD QUARTER (NO ACTIVITY)": 'report',
"MID-YEAR AMENDMENT": 'report_amendment',
"THIRD QUARTER AMENDMENT": 'report_amendment',
"YEAR-END AMENDMENT": 'report_amendment',
"YEAR-END AMENDMENT (NO ACTIVITY)": 'report_amendment',
"FIRST QUARTER AMENDMENT": 'report_amendment',
"FIRST QUARTER AMENDMENT (NO ACTIVITY)": 'report_amendment',
"FOURTH QUARTER AMENDMENT": 'report_amendment',
"SECOND QUARTER AMENDMENT": 'report_amendment',
"SECOND QUARTER AMENDMENT (NO ACTIVITY)": 'report_amendment',
"THIRD QUARTER AMENDMENT (NO ACTIVITY)": 'report_amendment',
"FOURTH QUARTER AMENDMENT (NO ACTIVITY)": 'report_amendment',
"MID-YEAR AMENDMENT (NO ACTIVITY)": 'report_amendment',
"MID-YEAR TERMINATION AMENDMENT": 'termination_amendment',
"MID-YEAR TERMINATION AMENDMENT (NO ACTIVITY)": 'termination_amendment',
"YEAR-END TERMINATION AMENDMENT": 'termination_amendment',
"YEAR-END TERMINATION AMENDMENT (NO ACTIVITY)": 'termination_amendment',
"SECOND QUARTER TERMINATION AMENDMENT (NO ACTIVITY)":
'termination_amendment',
"FIRST QUARTER TERMINATION AMENDMENT (NO ACTIVITY)":
'termination_amendment',
"FIRST QUARTER TERMINATION AMENDMENT": 'termination_amendment',
"SECOND QUARTER TERMINATION AMENDMENT": 'termination_amendment',
"THIRD QUARTER TERMINATION AMENDMENT": 'termination_amendment',
"THIRD QUARTER TERMINATION AMENDMENT (NO ACTIVITY)":
'termination_amendment',
"FOURTH QUARTER TERMINATION AMENDMENT (NO ACTIVITY)":
'termination_amendment',
"FOURTH QUARTER TERMINATION AMENDMENT": 'termination_amendment',
"YEAR-END TERMINATION LETTER": 'termination_letter',
"MID-YEAR TERMINATION LETTER": 'termination_letter',
"MISC. DOC": 'misc_document',
"MISC TERM": 'misc_termination',
}
| influence-usa/lobbying_federal_domestic | ref/sopr.py | Python | cc0-1.0 | 5,786 | [
"CASINO"
] | 3bc4094b426fc27075e29b9c7c12444265ed868bd423ba21237c7b027d087723 |
"""
.. _tut-sensor-locations:
Working with sensor locations
=============================
This tutorial describes how to read and plot sensor locations, and how
the physical location of sensors is handled in MNE-Python.
As usual we'll start by importing the modules we need and loading some
:ref:`example data <sample-dataset>`:
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, preload=True, verbose=False)
###############################################################################
# About montages and layouts
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`Montages <mne.channels.DigMontage>` contain sensor
# positions in 3D (``x``, ``y``, ``z``, in meters), and can be used to set
# the physical positions of sensors. By specifying the location of sensors
# relative to the brain, :class:`Montages <mne.channels.DigMontage>` play an
# important role in computing the forward solution and computing inverse
# estimates.
#
# In contrast, :class:`Layouts <mne.channels.Layout>` are *idealized* 2-D
# representations of sensor positions, and are primarily used for arranging
# individual sensor subplots in a topoplot, or for showing the *approximate*
# relative arrangement of sensors as seen from above.
#
# Working with built-in montages
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The 3D coordinates of MEG sensors are included in the raw recordings from MEG
# systems, and are automatically stored in the ``info`` attribute of the
# :class:`~mne.io.Raw` file upon loading. EEG electrode locations are much more
# variable because of differences in head shape. Idealized montages for many
# EEG systems are included during MNE-Python installation; these files are
# stored in your ``mne-python`` directory, in the
# :file:`mne/channels/data/montages` folder:
montage_dir = os.path.join(os.path.dirname(mne.__file__),
'channels', 'data', 'montages')
print('\nBUILT-IN MONTAGE FILES')
print('======================')
print(sorted(os.listdir(montage_dir)))
###############################################################################
# .. sidebar:: Computing sensor locations
#
# If you are interested in how standard ("idealized") EEG sensor positions
# are computed on a spherical head model, the `eeg_positions`_ repository
# provides code and documentation to this end.
#
# These built-in EEG montages can be loaded via
# :func:`mne.channels.make_standard_montage`. Note that when loading via
# :func:`~mne.channels.make_standard_montage`, provide the filename *without*
# its file extension:
ten_twenty_montage = mne.channels.make_standard_montage('standard_1020')
print(ten_twenty_montage)
###############################################################################
# Once loaded, a montage can be applied to data via one of the instance methods
# such as :meth:`raw.set_montage <mne.io.Raw.set_montage>`. It is also possible
# to skip the loading step by passing the filename string directly to the
# :meth:`~mne.io.Raw.set_montage` method. This won't work with our sample
# data, because it's channel names don't match the channel names in the
# standard 10-20 montage, so these commands are not run here:
# these will be equivalent:
# raw_1020 = raw.copy().set_montage(ten_twenty_montage)
# raw_1020 = raw.copy().set_montage('standard_1020')
###############################################################################
# :class:`Montage <mne.channels.DigMontage>` objects have a
# :meth:`~mne.channels.DigMontage.plot` method for visualization of the sensor
# locations in 3D; 2D projections are also possible by passing
# ``kind='topomap'``:
fig = ten_twenty_montage.plot(kind='3d')
fig.gca().view_init(azim=70, elev=15)
ten_twenty_montage.plot(kind='topomap', show_names=False)
###############################################################################
# .. _control-chan-projection:
#
# Controlling channel projection (MNE vs EEGLAB)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Channel positions in 2d space are obtained by projecting their actual 3d
# positions using a sphere as a reference. Because ``'standard_1020'`` montage
# contains realistic, not spherical, channel positions, we will use a different
# montage to demonstrate controlling how channels are projected to 2d space.
biosemi_montage = mne.channels.make_standard_montage('biosemi64')
biosemi_montage.plot(show_names=False)
###############################################################################
# By default a sphere with an origin in ``(0, 0, 0)`` x, y, z coordinates and
# radius of ``0.095`` meters (9.5 cm) is used. You can use a different sphere
# radius by passing a single value to ``sphere`` argument in any function that
# plots channels in 2d (like :meth:`~mne.channels.DigMontage.plot` that we use
# here, but also for example :func:`mne.viz.plot_topomap`):
biosemi_montage.plot(show_names=False, sphere=0.07)
###############################################################################
# To control not only radius, but also the sphere origin, pass a
# ``(x, y, z, radius)`` tuple to ``sphere`` argument:
biosemi_montage.plot(show_names=False, sphere=(0.03, 0.02, 0.01, 0.075))
###############################################################################
# In mne-python the head center and therefore the sphere center are calculated
# using :term:`fiducial points <fiducial>`.
# Because of this the head circle represents head
# circumference at the nasion and ear level, and not where it is commonly
# measured in 10-20 EEG system: above nasion at T4/T8, T3/T7, Oz, Fz level.
# Notice below that by default T7 and Oz channels are placed within the head
# circle, not on the head outline:
biosemi_montage.plot()
###############################################################################
# If you have previous EEGLAB experience you may prefer its convention to
# represent 10-20 head circumference with the head circle. To get EEGLAB-like
# channel layout you would have to move the sphere origin a few centimeters
# up on the z dimension:
biosemi_montage.plot(sphere=(0, 0, 0.035, 0.094))
###############################################################################
# Instead of approximating the EEGLAB-esque sphere location as above, you can
# calculate the sphere origin from position of Oz, Fpz, T3/T7 or T4/T8
# channels. This is easier once the montage has been applied to the data and
# channel positions are in the head space - see
# :ref:`this example <ex-topomap-eeglab-style>`.
###############################################################################
# .. _reading-dig-montages:
#
# Reading sensor digitization files
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In the sample data, setting the digitized EEG montage was done prior to
# saving the :class:`~mne.io.Raw` object to disk, so the sensor positions are
# already incorporated into the ``info`` attribute of the :class:`~mne.io.Raw`
# object (see the documentation of the reading functions and
# :meth:`~mne.io.Raw.set_montage` for details on how that works). Because of
# that, we can plot sensor locations directly from the :class:`~mne.io.Raw`
# object using the :meth:`~mne.io.Raw.plot_sensors` method, which provides
# similar functionality to
# :meth:`montage.plot() <mne.channels.DigMontage.plot>`.
# :meth:`~mne.io.Raw.plot_sensors` also allows channel selection by type, can
# color-code channels in various ways (by default, channels listed in
# ``raw.info['bads']`` will be plotted in red), and allows drawing into an
# existing matplotlib ``axes`` object (so the channel positions can easily be
# made as a subplot in a multi-panel figure):
# sphinx_gallery_thumbnail_number = 8
fig = plt.figure()
ax2d = fig.add_subplot(121)
ax3d = fig.add_subplot(122, projection='3d')
raw.plot_sensors(ch_type='eeg', axes=ax2d)
raw.plot_sensors(ch_type='eeg', axes=ax3d, kind='3d')
ax3d.view_init(azim=70, elev=15)
###############################################################################
# It's probably evident from the 2D topomap above that there is some
# irregularity in the EEG sensor positions in the :ref:`sample dataset
# <sample-dataset>` — this is because the sensor positions in that dataset are
# digitizations of the sensor positions on an actual subject's head, rather
# than idealized sensor positions based on a spherical head model. Depending on
# what system was used to digitize the electrode positions (e.g., a Polhemus
# Fastrak digitizer), you must use different montage reading functions (see
# :ref:`dig-formats`). The resulting :class:`montage <mne.channels.DigMontage>`
# can then be added to :class:`~mne.io.Raw` objects by passing it to the
# :meth:`~mne.io.Raw.set_montage` method (just as we did above with the name of
# the idealized montage ``'standard_1020'``). Once loaded, locations can be
# plotted with :meth:`~mne.channels.DigMontage.plot` and saved with
# :meth:`~mne.channels.DigMontage.save`, like when working with a standard
# montage.
#
# .. note::
#
# When setting a montage with :meth:`~mne.io.Raw.set_montage`
# the measurement info is updated in two places (the ``chs``
# and ``dig`` entries are updated). See :ref:`tut-info-class`.
# ``dig`` may contain HPI, fiducial, or head shape points in
# addition to electrode locations.
#
#
# Rendering sensor position with mayavi
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# It is also possible to render an image of a MEG sensor helmet in 3D, using
# mayavi instead of matplotlib, by calling :func:`mne.viz.plot_alignment`
fig = mne.viz.plot_alignment(raw.info, trans=None, dig=False, eeg=False,
surfaces=[], meg=['helmet', 'sensors'],
coord_frame='meg')
mne.viz.set_3d_view(fig, azimuth=50, elevation=90, distance=0.5)
###############################################################################
# :func:`~mne.viz.plot_alignment` requires an :class:`~mne.Info` object, and
# can also render MRI surfaces of the scalp, skull, and brain (by passing
# keywords like ``'head'``, ``'outer_skull'``, or ``'brain'`` to the
# ``surfaces`` parameter) making it useful for :ref:`assessing coordinate frame
# transformations <plot_source_alignment>`. For examples of various uses of
# :func:`~mne.viz.plot_alignment`, see :ref:`plot_montage`,
# :ref:`ex-eeg-on-scalp`, and :ref:`ex-plot-meg-sensors`.
#
#
# Working with layout files
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# As with montages, many layout files are included during MNE-Python
# installation, and are stored in the :file:`mne/channels/data/layouts` folder:
layout_dir = os.path.join(os.path.dirname(mne.__file__),
'channels', 'data', 'layouts')
print('\nBUILT-IN LAYOUT FILES')
print('=====================')
print(sorted(os.listdir(layout_dir)))
###############################################################################
# You may have noticed that the file formats and filename extensions of the
# built-in layout and montage files vary considerably. This reflects different
# manufacturers' conventions; to make loading easier the montage and layout
# loading functions in MNE-Python take the filename *without its extension* so
# you don't have to keep track of which file format is used by which
# manufacturer.
#
# To load a layout file, use the :func:`mne.channels.read_layout` function, and
# provide the filename *without* its file extension. You can then visualize the
# layout using its :meth:`~mne.channels.Layout.plot` method, or (equivalently)
# by passing it to :func:`mne.viz.plot_layout`:
biosemi_layout = mne.channels.read_layout('biosemi')
biosemi_layout.plot() # same result as: mne.viz.plot_layout(biosemi_layout)
###############################################################################
# Similar to the ``picks`` argument for selecting channels from
# :class:`~mne.io.Raw` objects, the :meth:`~mne.channels.Layout.plot` method of
# :class:`~mne.channels.Layout` objects also has a ``picks`` argument. However,
# because layouts only contain information about sensor name and location (not
# sensor type), the :meth:`~mne.channels.Layout.plot` method only allows
# picking channels by index (not by name or by type). Here we find the indices
# we want using :func:`numpy.where`; selection by name or type is possible via
# :func:`mne.pick_channels` or :func:`mne.pick_types`.
midline = np.where([name.endswith('z') for name in biosemi_layout.names])[0]
biosemi_layout.plot(picks=midline)
###############################################################################
# If you're working with a :class:`~mne.io.Raw` object that already has sensor
# positions incorporated, you can create a :class:`~mne.channels.Layout` object
# with either the :func:`mne.channels.make_eeg_layout` function or
# (equivalently) the :func:`mne.channels.find_layout` function.
layout_from_raw = mne.channels.make_eeg_layout(raw.info)
# same result as: mne.channels.find_layout(raw.info, ch_type='eeg')
layout_from_raw.plot()
###############################################################################
# .. note::
#
# There is no corresponding ``make_meg_layout`` function because sensor
# locations are fixed in a MEG system (unlike in EEG, where the sensor caps
# deform to fit each subject's head). Thus MEG layouts are consistent for a
# given system and you can simply load them with
# :func:`mne.channels.read_layout`, or use :func:`mne.channels.find_layout`
# with the ``ch_type`` parameter, as shown above for EEG.
#
# All :class:`~mne.channels.Layout` objects have a
# :meth:`~mne.channels.Layout.save` method that allows writing layouts to disk,
# in either :file:`.lout` or :file:`.lay` format (which format gets written is
# inferred from the file extension you pass to the method's ``fname``
# parameter). The choice between :file:`.lout` and :file:`.lay` format only
# matters if you need to load the layout file in some other software
# (MNE-Python can read either format equally well).
#
#
# .. LINKS
#
# .. _`eeg_positions`: https://github.com/sappelhoff/eeg_positions
| rkmaddox/mne-python | tutorials/intro/40_sensor_locations.py | Python | bsd-3-clause | 14,370 | [
"Mayavi"
] | a482a3ac51d52a03bb3879f30c6f0f6d32afbf9b16083372878a330c629f6973 |
''' file name : simple_linear_blender.py
Discription : This sample shows how to blend two images.
This is Python version of this tutorial : http://opencv.itseez.com/doc/tutorials/core/adding_images/adding_images.html#adding-images
Level : Beginner
Benefits : 1) Learns usage of cv2.addWeighted and 2) its numpy implementation
Usage : python simple_linear_blender.py
Written by : Abid K. (abidrahman2@gmail.com) , Visit opencvpython.blogspot.com for more tutorials '''
import cv2
import numpy as np
print ''' Simple Linear Blender
------------------------------------------
Enter value of alpha [0:1] :'''
alpha = float(input()) # Ask the value of alpha
if 0<=alpha<=1: # Check if 0<= alpha <=1
beta = 1.0 - alpha # Calculate beta = 1 - alpha
gamma = 0.0 # parameter gamma = 0
img1 = cv2.imread('lena.jpg')
img2 = cv2.imread('res.jpg')
if img1==None:
print "img1 not ready"
elif img2==None:
print "img2 not ready"
else:
dst = cv2.addWeighted(img1,alpha,img2,beta,gamma) # Get weighted sum of img1 and img2
#dst = np.uint8(alpha*(img1)+beta*(img2)) # This is simple numpy version of above line. But cv2 function is around 2x faster
cv2.imshow('dst',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print "value of alpha should be 0 and 1"
| asrob-uc3m/rpc_rpi | src/python/opencv_python_tutorials/Official_Tutorial_Python_Codes/2_core/add_images.py | Python | gpl-3.0 | 1,423 | [
"VisIt"
] | 90cfe6d52f966cf9a550b825603f73610ad618c5d4d1b4c49d093fb10e115363 |
# LOFAR IMAGING PIPELINE
#
# BBS Source Catalogue List
# Bart Scheers, 2011
# L.H.A.Scheers@uva.nl
# ------------------------------------------------------------------------------
import sys, string
import numpy as np
import monetdb.sql as db
import logging
from gsm_exceptions import GSMException
def expected_fluxes_in_fov(conn, ra_central, decl_central, fov_radius,
assoc_theta, bbsfile,
storespectraplots=False, deruiter_radius=0.,
vlss_flux_cutoff=None,
patchname=''):
"""Search for VLSS, WENSS and NVSS sources that
are in the given FoV. The FoV is set by its central position
(ra_central, decl_central) out to a radius of fov_radius.
The query looks for cross-matches around the sources, out
to a radius of assoc_theta.
All units are in degrees.
deruiter_radius is a measure for the association uncertainty that takes
position errors into account (see thesis Bart Scheers). If not given
as a positive value, it is read from the TKP config file. If not
available, it defaults to 3.717.
The query returns all vlss sources (id) that are in the FoV.
If so, the counterparts from other catalogues are returned as well
(also their ids).
If patchname is given, all sources get that patch name and the center of
the patch is given central ra/dec. Its brightness is the summed flux.
"""
DERUITER_R = deruiter_radius
if DERUITER_R <= 0:
try:
from tkp.config import config
DERUITER_R = config['source_association']['deruiter_radius']
##print "DERUITER_R =",DERUITER_R
except:
DERUITER_R=3.717
#TODO: Check what happens at high decl when alpha goes to 180 degrees
if ra_central - alpha(fov_radius, decl_central) < 0:
ra_min1 = np.float(ra_central - alpha(fov_radius, decl_central) + 360.0)
ra_max1 = np.float(360.0)
ra_min2 = np.float(0.0)
ra_max2 = np.float(ra_central + alpha(fov_radius, decl_central))
q = "q_across_ra0"
elif ra_central + alpha(fov_radius, decl_central) > 360:
ra_min1 = np.float(ra_central - alpha(fov_radius, decl_central))
ra_max1 = np.float(360.0)
ra_min2 = np.float(0.0)
ra_max2 = np.float(ra_central + alpha(fov_radius, decl_central) - 360)
q = "q_across_ra0"
elif ra_central - alpha(fov_radius, decl_central) < 0 and ra_central + alpha(fov_radius, decl_central) > 360:
raise BaseException("ra = %s > 360 degrees, not implemented yet" % str(ra_central + alpha(fov_radius, decl_central)))
else:
ra_min = np.float(ra_central - alpha(fov_radius, decl_central))
ra_max = np.float(ra_central + alpha(fov_radius, decl_central))
q = "q0"
if vlss_flux_cutoff is None:
vlss_flux_cutoff = 0.
status = True
bbsrows = []
totalFlux = 0.
# This is dimensionless search radius that takes into account
# the ra and decl difference between two sources weighted by
# their positional errors.
deRuiter_reduced = DERUITER_R/3600.
q_across_ra0 = """\
SELECT t0.v_catsrcid
,t0.catsrcname
,t1.wm_catsrcid
,t2.wp_catsrcid
,t3.n_catsrcid
,t0.v_flux
,t1.wm_flux
,t2.wp_flux
,t3.n_flux
,t0.v_flux_err
,t1.wm_flux_err
,t2.wp_flux_err
,t3.n_flux_err
,t1.wm_assoc_distance_arcsec
,t1.wm_assoc_r
,t2.wp_assoc_distance_arcsec
,t2.wp_assoc_r
,t3.n_assoc_distance_arcsec
,t3.n_assoc_r
,t0.pa
,t0.major
,t0.minor
,t0.ra
,t0.decl
FROM (SELECT c1.catsrcid AS v_catsrcid
,c1.catsrcname
,c1.ra
,c1.decl
,c1.i_int_avg AS v_flux
,c1.i_int_avg_err AS v_flux_err
,c1.pa
,c1.major
,c1.minor
FROM (SELECT catsrcid
,catsrcname
,ra
,decl
,pa
,major
,minor
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
) t0
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS wm_catsrcid
,c2.i_int_avg AS wm_flux
,c2.i_int_avg_err AS wm_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS wm_assoc_distance_arcsec
,3600 * SQRT(((c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
* (c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS wm_assoc_r
FROM (SELECT catsrcid
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 5
AND (src_type = 'S' OR src_type = 'M')
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
* (c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t1
ON t0.v_catsrcid = t1.v_catsrcid
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS wp_catsrcid
,c2.i_int_avg AS wp_flux
,c2.i_int_avg_err AS wp_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS wp_assoc_distance_arcsec
,3600 * SQRT(( (c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
* (c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS wp_assoc_r
FROM (SELECT catsrcid
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 6
AND (src_type = 'S' OR src_type = 'M')
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
* (c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t2
ON t0.v_catsrcid = t2.v_catsrcid
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS n_catsrcid
,c2.i_int_avg AS n_flux
,c2.i_int_avg_err AS n_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS n_assoc_distance_arcsec
,3600 * SQRT(((c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
* (c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS n_assoc_r
FROM (SELECT catsrcid
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 3
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
* (c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t3
ON t0.v_catsrcid = t3.v_catsrcid
WHERE t0.v_flux >= %(vlss_flux_cutoff)s
ORDER BY t0.v_catsrcid
"""
q0 = """\
SELECT t0.v_catsrcid
,t0.catsrcname
,t1.wm_catsrcid
,t2.wp_catsrcid
,t3.n_catsrcid
,t0.v_flux
,t1.wm_flux
,t2.wp_flux
,t3.n_flux
,t0.v_flux_err
,t1.wm_flux_err
,t2.wp_flux_err
,t3.n_flux_err
,t1.wm_assoc_distance_arcsec
,t1.wm_assoc_r
,t2.wp_assoc_distance_arcsec
,t2.wp_assoc_r
,t3.n_assoc_distance_arcsec
,t3.n_assoc_r
,t0.pa
,t0.major
,t0.minor
,t0.ra
,t0.decl
FROM (SELECT c1.catsrcid AS v_catsrcid
,c1.catsrcname
,c1.ra
,c1.decl
,c1.i_int_avg AS v_flux
,c1.i_int_avg_err AS v_flux_err
,c1.pa
,c1.major
,c1.minor
FROM (SELECT catsrcid
,catsrcname
,ra
,decl
,pa
,major
,minor
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
) t0
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS wm_catsrcid
,c2.i_int_avg AS wm_flux
,c2.i_int_avg_err AS wm_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS wm_assoc_distance_arcsec
,SQRT(((c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
* (c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS wm_assoc_r
FROM (SELECT catsrcid
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 5
AND (src_type = 'S' OR src_type = 'M')
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.ra BETWEEN c1.ra - alpha(%(assoc_theta)s, c1.decl)
AND c1.ra + alpha(%(assoc_theta)s, c1.decl)
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
* (c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t1
ON t0.v_catsrcid = t1.v_catsrcid
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS wp_catsrcid
,c2.i_int_avg AS wp_flux
,c2.i_int_avg_err AS wp_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS wp_assoc_distance_arcsec
,SQRT(((c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
* (c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS wp_assoc_r
FROM (SELECT catsrcid
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 6
AND (src_type = 'S' OR src_type = 'M')
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.ra BETWEEN c1.ra - alpha(%(assoc_theta)s, c1.decl)
AND c1.ra + alpha(%(assoc_theta)s, c1.decl)
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
* (c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t2
ON t0.v_catsrcid = t2.v_catsrcid
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS n_catsrcid
,c2.i_int_avg AS n_flux
,c2.i_int_avg_err AS n_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS n_assoc_distance_arcsec
,SQRT(((c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
* (c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS n_assoc_r
FROM (SELECT catsrcid
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 3
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.ra BETWEEN c1.ra - alpha(%(assoc_theta)s, c1.decl)
AND c1.ra + alpha(%(assoc_theta)s, c1.decl)
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
* (c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t3
ON t0.v_catsrcid = t3.v_catsrcid
WHERE t0.v_flux >= %(vlss_flux_cutoff)s
ORDER BY t0.v_catsrcid
"""
try:
cursor = conn.cursor()
if q == "q0":
query = q0
args = {'decl_central': decl_central
,'ra_central': ra_central
,'fov_radius': fov_radius
,'assoc_theta': assoc_theta
,'deRuiter_reduced': deRuiter_reduced
,'vlss_flux_cutoff': vlss_flux_cutoff}
cursor.execute(query, args)
elif q == "q_across_ra0":
query = q_across_ra0
args = {'decl_central': decl_central
,'ra_central': ra_central
,'ra_min1': ra_min1
,'ra_max1': ra_max1
,'ra_min2': ra_min2
,'ra_max2': ra_max2
,'fov_radius': fov_radius
,'assoc_theta': assoc_theta
,'deRuiter_reduced': deRuiter_reduced
,'vlss_flux_cutoff': vlss_flux_cutoff}
cursor.execute(query, args)
else:
raise BaseException("ra = %s > 360 degrees, not implemented yet" % str(ra_central + alpha(fov_radius, decl_central)))
results = zip(*cursor.fetchall())
cursor.close()
if len(results) == 0:
raise GSMException("No sources found, so Sky Model File %s is not created" % (bbsfile,))
vlss_catsrcid = results[0]
vlss_name = results[1]
wenssm_catsrcid = results[2]
wenssp_catsrcid = results[3]
nvss_catsrcid = results[4]
v_flux = results[5]
wm_flux = results[6]
wp_flux = results[7]
n_flux = results[8]
v_flux_err = results[9]
wm_flux_err = results[10]
wp_flux_err = results[11]
n_flux_err = results[12]
wm_assoc_distance_arcsec = results[13]
wm_assoc_r = results[14]
wp_assoc_distance_arcsec = results[15]
wp_assoc_r = results[16]
n_assoc_distance_arcsec = results[17]
n_assoc_r = results[18]
pa = results[19]
major = results[20]
minor = results[21]
ra = results[22]
decl = results[23]
spectrumfiles = []
# Check for duplicate vlss_names. This may arise when a VLSS source
# is associated with one or more (genuine) counterparts.
# Eg., if two NVSS sources are seen as counterparts
# VLSS - WENSS - NVSS_1
# VLSS - WENSS - NVSS_2
# two rows will be added to the sky model, where the VLSS name
# is postfixed with _0 and _1, resp.
import collections
items = collections.defaultdict(list)
src_name = list(vlss_name)
for i, item in enumerate(src_name):
items[item].append(i)
for item, locs in items.iteritems():
if len(locs) > 1:
#print "duplicates of", item, "at", locs
for j in range(len(locs)):
src_name[locs[j]] = src_name[locs[j]] + "_" + str(j)
if len(results) != 0:
for i in range(len(vlss_catsrcid)):
##print "\ni = ", i
bbsrow = ""
# Here we check the cases for the degree of the polynomial spectral index fit
#print i, vlss_name[i],vlss_catsrcid[i], wenssm_catsrcid[i], wenssp_catsrcid[i], nvss_catsrcid[i]
# Write the vlss name of the source (either postfixed or not)
bbsrow += src_name[i] + ", "
# According to Jess, only sources that have values for all
# three are considered as GAUSSIAN
if pa[i] is not None and major[i] is not None and minor[i] is not None:
#print "Gaussian:", pa[i], major[i], minor[i]
bbsrow += "GAUSSIAN, "
else:
#print "POINT"
bbsrow += "POINT, "
#print "ra = ", ra[i], "; decl = ", decl[i]
#print "BBS ra = ", ra2bbshms(ra[i]), "; BBS decl = ", decl2bbsdms(decl[i])
bbsrow += ra2bbshms(ra[i]) + ", " + decl2bbsdms(decl[i]) + ", "
# Stokes I id default, so filed is empty
#bbsrow += ", "
lognu = []
logflux = []
lognu.append(np.log10(74.0/60.0))
logflux.append(np.log10(v_flux[i]))
if wenssm_catsrcid[i] is not None:
lognu.append(np.log10(325.0/60.0))
logflux.append(np.log10(wm_flux[i]))
if wenssp_catsrcid[i] is not None:
lognu.append(np.log10(352.0/60.0))
logflux.append(np.log10(wp_flux[i]))
if nvss_catsrcid[i] is not None:
lognu.append(np.log10(1400.0/60.0))
logflux.append(np.log10(n_flux[i]))
f = ""
for j in range(len(logflux)):
f += str(10**logflux[j]) + "; "
##print f
#print "len(lognu) = ",len(lognu), "nvss_catsrcid[",i,"] =", nvss_catsrcid[i]
# Here we write the expected flux values at 60 MHz, and the fitted spectral index and
# and curvature term
if len(lognu) == 1:
#print "Exp. flux:", 10**(np.log10(v_flux[i]) + 0.7 * np.log10(74.0/60.0))
#print "Default -0.7"
fluxrow = round(10**(np.log10(v_flux[i]) + 0.7 * np.log10(74.0/60.0)), 2)
totalFlux += fluxrow
bbsrow += str(fluxrow) + ", , , , , "
bbsrow += "[-0.7]"
elif len(lognu) == 2 or (len(lognu) == 3 and nvss_catsrcid[i] is None):
#print "Do a 1-degree polynomial fit"
# p has form : p(x) = p[0] + p[1]*x
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 1))
#print p
if storespectraplots:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "spectrum_%s.eps" % vlss_name[i])
spectrumfiles.append(spectrumfile)
# Default reference frequency is reported, so we leave it empty here;
# Catalogues just report on Stokes I, so others are empty.
fluxrow = round(10**p[0], 4)
totalFlux += fluxrow
bbsrow += str(fluxrow) + ", , , , , "
bbsrow += "[" + str(round(p[1], 4)) + "]"
elif (len(lognu) == 3 and nvss_catsrcid[i] is not None) or len(lognu) == 4:
#print "Do a 2-degree polynomial fit"
# p has form : p(x) = p[0] + p[1]*x + p[2]*x**2
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 2))
#print p
if storespectraplots:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "spectrum_%s.eps" % vlss_name[i])
spectrumfiles.append(spectrumfile)
# Default reference frequency is reported, so we leave it empty here
bbsrow += str(round(10**p[0], 4)) + ", , , , , "
bbsrow += "[" + str(round(p[1],4)) + ", " + str(round(p[2],4)) + "]"
if pa[i] is not None and major[i] is not None and minor[i] is not None:
# Gaussian source:
bbsrow += ", " + str(round(major[i], 2)) + ", " + str(round(minor[i], 2)) + ", " + str(round(pa[i], 2))
#print bbsrow
bbsrows.append (bbsrow)
if storespectraplots:
print "Spectra available in:", spectrumfiles
# Write the format line.
# Optionally it contains a column containing the patch name.
skymodel = open(bbsfile, 'w')
header = "FORMAT = Name, Type, Ra, Dec, I, Q, U, V, ReferenceFrequency='60e6', SpectralIndex='[0.0]', MajorAxis, MinorAxis, Orientation"
# Add fixed patch name to the header and add a line defining the patch.
if len(patchname) > 0:
header += ", patch=fixed'" + patchname + "'\n\n"
header += "# the next line defines the patch\n"
header += ',, ' + ra2bbshms(ra_central) + ', ' + decl2bbsdms(decl_central) + ', ' + str(totalFlux)
header += "\n\n# the next lines define the sources\n"
skymodel.write(header)
for bbsrow in bbsrows:
skymodel.write(bbsrow + '\n')
skymodel.close()
print "Sky model stored in source table:", bbsfile
except db.Error, e:
logging.warn("Failed on query nr %s; for reason %s" % (query, e))
raise
def plotSpectrum(x, y, p, f):
import pylab
expflux = "Exp. flux: " + str(round(10**p(0),3)) + " Jy"
fig = pylab.figure()
ax = fig.add_subplot(111)
for i in range(len(ax.get_xticklabels())):
ax.get_xticklabels()[i].set_size('x-large')
for i in range(len(ax.get_yticklabels())):
ax.get_yticklabels()[i].set_size('x-large')
ax.set_xlabel(r'$\log \nu/\nu_0$', size='x-large')
ax.set_ylabel('$\log S$', size='x-large')
# Roughly between log10(30/60) and log10(1500/60)
xp = np.linspace(-0.3, 1.5, 100)
ax.plot(x, y, 'o', label='cat fluxes')
ax.plot(0.0, p(0), 'o', color='k', label=expflux )
ax.plot(xp, p(xp), linestyle='--', linewidth=2, label='fit')
pylab.legend(numpoints=1, loc='best')
pylab.grid(True)
pylab.savefig(f, dpi=600)
return f
def decl2bbsdms(d):
"""Based on function deg2dec Written by Enno Middelberg 2001
http://www.atnf.csiro.au/people/Enno.Middelberg/python/python.html
"""
deg = float(d)
sign = "+"
# test whether the input numbers are sane:
# if negative, store "-" in sign and continue calulation
# with positive value
if deg < 0:
sign = "-"
deg = deg * (-1)
#if deg > 180:
# logging.warn("%s: inputs may not exceed 180!" % deg)
# raise
#if deg > 90:
# print `deg`+" exceeds 90, will convert it to negative dec\n"
# deg=deg-90
# sign="-"
if deg < -90 or deg > 90:
logging.warn("%s: inputs may not exceed 90 degrees!" % deg)
hh = int(deg)
mm = int((deg - int(deg)) * 60)
ss = '%10.8f' % (((deg - int(deg)) * 60 - mm) * 60)
#print '\t'+sign+string.zfill(`hh`,2)+':'+string.zfill(`mm`,2)+':'+'%10.8f' % ss
#print '\t'+sign+string.zfill(`hh`,2)+' '+string.zfill(`mm`,2)+' '+'%10.8f' % ss
#print '\t'+sign+string.zfill(`hh`,2)+'h'+string.zfill(`mm`,2)+'m'+'%10.8fs\n' % ss
return sign + string.zfill(`hh`, 2) + '.' + string.zfill(`mm`, 2) + '.' + string.zfill(ss, 11)
def ra2bbshms(a):
deg=float(a)
# test whether the input numbers are sane:
if deg < 0 or deg > 360:
logging.warn("%s: inputs may not exceed 90 degrees!" % deg)
hh = int(deg / 15)
mm = int((deg - 15 * hh) * 4)
ss = '%10.8f' % ((4 * deg - 60 * hh - mm) * 60)
#print '\t'+string.zfill(`hh`,2)+':'+string.zfill(`mm`,2)+':'+'%10.8f' % ss
#print '\t'+string.zfill(`hh`,2)+' '+string.zfill(`mm`,2)+' '+'%10.8f' % ss
#print '\t'+string.zfill(`hh`,2)+'h'+string.zfill(`mm`,2)+'m'+'%10.8fs\n' % ss
return string.zfill(`hh`, 2) + ':' + string.zfill(`mm`, 2) + ':' + string.zfill(ss, 11)
def alpha(theta, decl):
if abs(decl) + theta > 89.9:
return 180.0
else:
return degrees(abs(np.arctan(np.sin(radians(theta)) / np.sqrt(abs(np.cos(radians(decl - theta)) * np.cos(radians(decl + theta)))))))
def degrees(r):
return r * 180 / np.pi
def radians(d):
return d * np.pi / 180
| jjdmol/LOFAR | CEP/GSM/src/gsmutils.py | Python | gpl-3.0 | 48,216 | [
"Gaussian"
] | 0a4a8ed9c7d4245064b26bfb7707baf7e4dc202b27117c5691dd4af1b7c64342 |
"""Utilities to help Computing Element Queues manipulation
"""
import os
import hashlib
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import fromChar
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getDIRACPlatform
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.Resources.Computing.ComputingElementFactory import ComputingElementFactory
def getQueuesResolved(
siteDict, queueCECache, gridEnv=None, setup=None, workingDir="", checkPlatform=False, instantiateCEs=False
):
"""Get the list of relevant CEs (what is in siteDict) and their descriptions.
The main goal of this method is to return a dictionary of queues
"""
queueDict = {}
ceFactory = ComputingElementFactory()
for site in siteDict:
for ce in siteDict[site]:
ceDict = siteDict[site][ce]
pilotRunDirectory = ceDict.get("PilotRunDirectory", "")
# ceMaxRAM = ceDict.get('MaxRAM', None)
qDict = ceDict.pop("Queues")
for queue in qDict:
queueName = "%s_%s" % (ce, queue)
queueDict[queueName] = {}
queueDict[queueName]["ParametersDict"] = qDict[queue]
queueDict[queueName]["ParametersDict"]["Queue"] = queue
queueDict[queueName]["ParametersDict"]["GridCE"] = ce
queueDict[queueName]["ParametersDict"]["Site"] = site
queueDict[queueName]["ParametersDict"]["GridEnv"] = gridEnv
queueDict[queueName]["ParametersDict"]["Setup"] = setup
# Evaluate the CPU limit of the queue according to the Glue convention
computeQueueCPULimit(queueDict[queueName]["ParametersDict"])
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
resolveTags(ceDict, queueDict[queueName]["ParametersDict"])
# Some parameters can be defined on the CE level and are inherited by all Queues
setAdditionalParams(ceDict, queueDict[queueName]["ParametersDict"])
if pilotRunDirectory:
queueDict[queueName]["ParametersDict"]["JobExecDir"] = pilotRunDirectory
ceQueueDict = dict(ceDict)
ceQueueDict.update(queueDict[queueName]["ParametersDict"])
if instantiateCEs:
# Generate the CE object for the queue or pick the already existing one
# if the queue definition did not change
queueHash = generateQueueHash(ceQueueDict)
if queueName in queueCECache and queueCECache[queueName]["Hash"] == queueHash:
queueCE = queueCECache[queueName]["CE"]
else:
result = ceFactory.getCE(ceName=ce, ceType=ceDict["CEType"], ceParametersDict=ceQueueDict)
if not result["OK"]:
continue
queueCECache.setdefault(queueName, {})
queueCECache[queueName]["Hash"] = queueHash
queueCECache[queueName]["CE"] = result["Value"]
queueCE = queueCECache[queueName]["CE"]
queueDict[queueName]["ParametersDict"].update(queueCE.ceParameters)
queueDict[queueName]["CE"] = queueCE
result = queueDict[queueName]["CE"].isValid()
if not result["OK"]:
continue
queueDict[queueName]["CEName"] = ce
queueDict[queueName]["CEType"] = ceDict["CEType"]
queueDict[queueName]["Site"] = site
queueDict[queueName]["QueueName"] = queue
queueDict[queueName]["QueryCEFlag"] = ceDict.get("QueryCEFlag", "false")
if checkPlatform:
setPlatform(ceDict, queueDict[queueName]["ParametersDict"])
bundleProxy = queueDict[queueName]["ParametersDict"].get("BundleProxy", ceDict.get("BundleProxy"))
if bundleProxy and bundleProxy.lower() in ["true", "yes", "1"]:
queueDict[queueName]["BundleProxy"] = True
return S_OK(queueDict)
def computeQueueCPULimit(queueDict):
"""Evaluate the CPU limit of the queue according to the Glue convention"""
if "maxCPUTime" in queueDict and "SI00" in queueDict:
maxCPUTime = float(queueDict["maxCPUTime"])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict["SI00"])
queueCPUTime = 60 / 250 * maxCPUTime * si00
queueDict["CPUTime"] = int(queueCPUTime)
def resolveTags(ceDict, queueDict):
"""Tags & RequiredTags defined on the Queue level and on the CE level are concatenated.
This also converts them from a string to a list if required.
"""
for tagFieldName in ("Tag", "RequiredTag"):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, str):
ceTags = fromChar(ceTags)
queueTags = queueDict.get(tagFieldName, [])
if isinstance(queueTags, str):
queueTags = fromChar(queueTags)
queueDict[tagFieldName] = list(set(ceTags) | set(queueTags))
def setPlatform(ceDict, queueDict):
"""Set platform according to CE parameters if not defined"""
platform = queueDict.get("Platform", ceDict.get("Platform", ""))
if not platform and "OS" in ceDict:
architecture = ceDict.get("architecture", "x86_64")
platform = "_".join([architecture, ceDict["OS"]])
if "Platform" not in queueDict and platform:
result = getDIRACPlatform(platform)
if result["OK"]:
queueDict["Platform"] = result["Value"][0]
else:
queueDict["Platform"] = platform
def setAdditionalParams(ceDict, queueDict):
"""Some parameters can be defined on the CE level and are inherited by all Queues"""
for parameter in ["MaxRAM", "NumberOfProcessors", "WholeNode"]:
queueParameter = queueDict.get(parameter, ceDict.get(parameter))
if queueParameter:
queueDict[parameter] = queueParameter
def generateQueueHash(queueDict):
"""Generate a hash of the queue description"""
myMD5 = hashlib.md5()
myMD5.update(str(queueDict).encode())
hexstring = myMD5.hexdigest()
return hexstring
def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or
S_ERROR with the reason for no match
"""
# Check the job description validity
job = ClassAd(jobJDL)
if not job.isOK():
return S_ERROR("Invalid job description")
noMatchReasons = []
# Check job requirements to resource
# 1. CPUTime
cpuTime = job.getAttributeInt("CPUTime")
if not cpuTime:
cpuTime = 84600
if cpuTime > int(queueDict.get("CPUTime", 0)):
noMatchReasons.append("Job CPUTime requirement not satisfied")
if not fullMatch:
return S_OK({"Match": False, "Reason": noMatchReasons[0]})
# 2. Multi-value match requirements
for parameter in ["Site", "GridCE", "Platform", "SubmitPool", "JobType"]:
if parameter in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression("%ss" % parameter))
queueSet = set(fromChar(queueDict[parameter]))
if valueSet and queueSet and not valueSet.intersection(queueSet):
valueToPrint = ",".join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append("Job %s %s requirement not satisfied" % (parameter, valueToPrint))
if not fullMatch:
return S_OK({"Match": False, "Reason": noMatchReasons[0]})
# 3. Banned multi-value match requirements
for par in ["Site", "GridCE", "Platform", "JobType"]:
parameter = "Banned%s" % par
if par in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression("%ss" % parameter))
queueSet = set(fromChar(queueDict[par]))
if valueSet and queueSet and valueSet.issubset(queueSet):
valueToPrint = ",".join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append("Job %s %s requirement not satisfied" % (parameter, valueToPrint))
if not fullMatch:
return S_OK({"Match": False, "Reason": noMatchReasons[0]})
# 4. Tags
tags = set(job.getListFromExpression("Tag"))
nProc = job.getAttributeInt("NumberOfProcessors")
if nProc and nProc > 1:
tags.add("MultiProcessor")
wholeNode = job.getAttributeString("WholeNode")
if wholeNode:
tags.add("WholeNode")
queueTags = set(queueDict.get("Tag", []))
if not tags.issubset(queueTags):
noMatchReasons.append("Job Tag %s not satisfied" % ",".join(tags))
if not fullMatch:
return S_OK({"Match": False, "Reason": noMatchReasons[0]})
# 4. MultiProcessor requirements
if nProc and nProc > int(queueDict.get("NumberOfProcessors", 1)):
noMatchReasons.append("Job NumberOfProcessors %d requirement not satisfied" % nProc)
if not fullMatch:
return S_OK({"Match": False, "Reason": noMatchReasons[0]})
# 5. RAM
ram = job.getAttributeInt("RAM")
# If MaxRAM is not specified in the queue description, assume 2GB
if ram and ram > int(queueDict.get("MaxRAM", 2048) / 1024):
noMatchReasons.append("Job RAM %d requirement not satisfied" % ram)
if not fullMatch:
return S_OK({"Match": False, "Reason": noMatchReasons[0]})
# Check resource requirements to job
# 1. OwnerGroup - rare case but still
if "OwnerGroup" in queueDict:
result = getProxyInfo(disableVOMS=True)
if not result["OK"]:
return S_ERROR("No valid proxy available")
ownerGroup = result["Value"]["group"]
if ownerGroup != queueDict["OwnerGroup"]:
noMatchReasons.append("Resource OwnerGroup %s requirement not satisfied" % queueDict["OwnerGroup"])
if not fullMatch:
return S_OK({"Match": False, "Reason": noMatchReasons[0]})
# 2. Required tags
requiredTags = set(queueDict.get("RequiredTags", []))
if not requiredTags.issubset(tags):
noMatchReasons.append("Resource RequiredTags %s not satisfied" % ",".join(requiredTags))
if not fullMatch:
return S_OK({"Match": False, "Reason": noMatchReasons[0]})
# 3. RunningLimit
site = queueDict["Site"]
ce = queueDict.get("GridCE")
opsHelper = Operations()
result = opsHelper.getSections("JobScheduling/RunningLimit")
if result["OK"] and site in result["Value"]:
result = opsHelper.getSections("JobScheduling/RunningLimit/%s" % site)
if result["OK"]:
for parameter in result["Value"]:
value = job.getAttributeString(parameter)
if (
value
and (
opsHelper.getValue("JobScheduling/RunningLimit/%s/%s/%s" % (site, parameter, value), 1)
or opsHelper.getValue(
"JobScheduling/RunningLimit/%s/CEs/%s/%s/%s" % (site, ce, parameter, value), 1
)
)
== 0
):
noMatchReasons.append("Resource operational %s requirement not satisfied" % parameter)
if not fullMatch:
return S_OK({"Match": False, "Reason": noMatchReasons[0]})
return S_OK({"Match": not bool(noMatchReasons), "Reason": noMatchReasons})
| ic-hep/DIRAC | src/DIRAC/WorkloadManagementSystem/Utilities/QueueUtilities.py | Python | gpl-3.0 | 12,592 | [
"DIRAC"
] | 7fa9d7af58da5d60701c3c601de925672590fbaea1de71315fc79dea9213a788 |
# author: brian dillmann
# for rscs
from context import Timer
import unittest
import time
class test_time_input(unittest.TestCase):
def test_start_on_init(self):
input = Timer('timer')
t = input.read()
self.assertGreater(t, 0)
def test_seconds_interval(self):
input1 = Timer('timer')
input2 = Timer('timer', 's')
time.sleep(10)
t1 = input1.read()
t2 = input2.read()
self.assertGreater(t1, 9)
self.assertGreater(t2, 9)
self.assertLess(t1, 11)
self.assertLess(t2, 11)
def test_reset(self):
input = Timer('timer', 's')
time.sleep(5)
t = input.read()
self.assertGreater(t, 4)
input.start_time()
t = input.read()
self.assertLess(t, 1)
def test_millis(self):
input = Timer('timer', 'ms')
time.sleep(1)
t = input.read()
self.assertGreater(t, 900)
self.assertLess(t, 1100)
def test_mins(self):
input = Timer('timer', 'm')
time.sleep(3)
t = input.read()
self.assertLess(t, 1)
if __name__ == 'main':
unittest.main()
| dillmann/rscs | test/devicetests/time_input_test.py | Python | mit | 976 | [
"Brian"
] | 4aa2258916196867ee7563a109307f5e6f0a694e2ae70ac93825d25c9dce0be8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.